code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
def UpperCAmelCase ( A__: int ) -> bool:
return str(A__ ) == str(A__ )[::-1]
def UpperCAmelCase ( A__: int ) -> int:
return int(A__ ) + int(str(A__ )[::-1] )
def UpperCAmelCase ( A__: int = 10000 ) -> int:
__lowerCamelCase : List[str] = []
for num in range(1 , A__ ):
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = num
while iterations < 50:
__lowerCamelCase : Optional[int] = sum_reverse(A__ )
iterations += 1
if is_palindrome(A__ ):
break
else:
lychrel_nums.append(A__ )
return len(A__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 594 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : List[Any] = ['image_processor', 'tokenizer']
__a : List[Any] = 'BlipImageProcessor'
__a : str = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , __a , __a ):
__lowerCamelCase : str = False
super().__init__(__a , __a )
__lowerCamelCase : Union[str, Any] = self.image_processor
def __call__( self , __a = None , __a = None , __a = True , __a = False , __a = None , __a = None , __a = 0 , __a = None , __a = None , __a = False , __a = False , __a = False , __a = False , __a = False , __a = True , __a = None , **__a , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__lowerCamelCase : List[Any] = self.tokenizer
__lowerCamelCase : List[str] = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
return text_encoding
# add pixel_values
__lowerCamelCase : Any = self.image_processor(__a , return_tensors=__a )
if text is not None:
__lowerCamelCase : Tuple = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
else:
__lowerCamelCase : Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(__a )
return encoding_image_processor
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case_ ( self ):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 594 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : str = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 533 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase : List[Any] = {"""UserAgent""": UserAgent().random}
def a__ ( snake_case__ ) -> dict:
lowerCamelCase = script.contents[0]
lowerCamelCase = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
lowerCamelCase = f'https://www.instagram.com/{username}/'
lowerCamelCase = self.get_json()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = requests.get(self.url , headers=_a ).text
lowerCamelCase = BeautifulSoup(_a , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
"""simple docstring"""
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self ):
"""simple docstring"""
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.user_data["username"]
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.user_data["full_name"]
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.user_data["biography"]
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.user_data["business_email"]
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.user_data["external_url"]
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.user_data["is_verified"]
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.user_data["is_private"]
def a__ ( snake_case__ = "github" ) -> None:
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
lowerCamelCase = InstagramUser(snake_case__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , snake_case__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = InstagramUser("""github""")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 533 | 1 |
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCAmelCase : Tuple =abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _UpperCamelCase ( lowercase__ ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( lowercase__ ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__SCREAMING_SNAKE_CASE : Dict = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE_ , id=SCREAMING_SNAKE_CASE_ )
| 696 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCAmelCase ( ctypes.Structure ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def lowercase () -> Optional[int]:
if os.name == "nt":
SCREAMING_SNAKE_CASE = CursorInfo()
SCREAMING_SNAKE_CASE = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def lowercase () -> int:
if os.name == "nt":
SCREAMING_SNAKE_CASE = CursorInfo()
SCREAMING_SNAKE_CASE = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def lowercase () -> Dict:
try:
hide_cursor()
yield
finally:
show_cursor()
| 247 | 0 |
from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : str = 0 , __magic_name__ : str = -1 ) -> int:
"""simple docstring"""
if hi < 0:
UpperCamelCase :Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
UpperCamelCase :Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
UpperCamelCase :List[str] = mid + 1
else:
UpperCamelCase :Any = mid
return lo
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Union[str, Any] = 0 , __magic_name__ : List[Any] = -1 ) -> int:
"""simple docstring"""
if hi < 0:
UpperCamelCase :Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
UpperCamelCase :Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
UpperCamelCase :int = mid + 1
else:
UpperCamelCase :int = mid
return lo
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : int = 0 , __magic_name__ : Any = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] = 0 , __magic_name__ : Optional[Any] = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : int ) -> int | None:
"""simple docstring"""
UpperCamelCase :Optional[int] = 0
UpperCamelCase :Optional[int] = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
UpperCamelCase :Tuple = left + (right - left) // 2
UpperCamelCase :List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
UpperCamelCase :Optional[int] = midpoint - 1
else:
UpperCamelCase :Optional[int] = midpoint + 1
return None
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] ) -> int | None:
"""simple docstring"""
UpperCamelCase :str = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : Any ) -> int | None:
"""simple docstring"""
if right < left:
return None
UpperCamelCase :List[Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 )
else:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''Enter numbers separated by comma:\n''').strip()
UpperCAmelCase_ : Optional[Any] = sorted(int(item) for item in user_input.split(''','''))
UpperCAmelCase_ : Dict = int(input('''Enter a single number to be found in the list:\n'''))
UpperCAmelCase_ : Optional[Any] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 701 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : str ) -> str:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :List[str] = tmp_path / """cache"""
UpperCamelCase :str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase :List[str] = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ ).read()
_check_sql_dataset(__magic_name__ , __magic_name__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = tmp_path / """cache"""
UpperCamelCase :Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase :List[Any] = (
Features({feature: Value(__magic_name__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase :Optional[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__magic_name__ , cache_dir=__magic_name__ ).read()
_check_sql_dataset(__magic_name__ , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] ) -> List[Any]:
"""simple docstring"""
with contextlib.closing(sqlitea.connect(__magic_name__ ) ) as con:
UpperCamelCase :Optional[int] = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Any = tmp_path / """cache"""
UpperCamelCase :int = os.path.join(__magic_name__ , """tmp.sql""" )
UpperCamelCase :Tuple = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__magic_name__ ).read()
SqlDatasetWriter(__magic_name__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
UpperCamelCase :List[str] = iter_sql_file(__magic_name__ )
UpperCamelCase :Optional[int] = iter_sql_file(__magic_name__ )
for rowa, rowa in zip(__magic_name__ , __magic_name__ ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :List[Any] = tmp_path / """cache"""
UpperCamelCase :Optional[Any] = os.path.join(__magic_name__ , """tmp.sql""" )
UpperCamelCase :List[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__magic_name__ ).read()
SqlDatasetWriter(__magic_name__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
UpperCamelCase :List[str] = iter_sql_file(__magic_name__ )
UpperCamelCase :Any = iter_sql_file(__magic_name__ )
for rowa, rowa in zip(__magic_name__ , __magic_name__ ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :int = tmp_path / """cache"""
UpperCamelCase :Dict = os.path.join(__magic_name__ , """tmp.sql""" )
UpperCamelCase :List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__magic_name__ ).read()
with pytest.raises(__magic_name__ ):
SqlDatasetWriter(__magic_name__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 590 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A__ ( A__ ):
A__ = '''beit'''
def __init__( self : Optional[Any] , _a : int=8192 , _a : str=768 , _a : Optional[Any]=12 , _a : List[str]=12 , _a : int=3072 , _a : Optional[Any]="gelu" , _a : Optional[int]=0.0 , _a : List[str]=0.0 , _a : List[Any]=0.02 , _a : Dict=1e-12 , _a : Dict=224 , _a : Optional[Any]=16 , _a : Optional[Any]=3 , _a : Any=False , _a : str=False , _a : Optional[int]=False , _a : Any=False , _a : str=0.1 , _a : Optional[int]=0.1 , _a : Tuple=True , _a : int=[3, 5, 7, 11] , _a : Union[str, Any]=[1, 2, 3, 6] , _a : str=True , _a : Any=0.4 , _a : Union[str, Any]=256 , _a : str=1 , _a : int=False , _a : List[Any]=255 , **_a : str , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =use_mask_token
_SCREAMING_SNAKE_CASE =use_absolute_position_embeddings
_SCREAMING_SNAKE_CASE =use_relative_position_bias
_SCREAMING_SNAKE_CASE =use_shared_relative_position_bias
_SCREAMING_SNAKE_CASE =layer_scale_init_value
_SCREAMING_SNAKE_CASE =drop_path_rate
_SCREAMING_SNAKE_CASE =use_mean_pooling
# decode head attributes (semantic segmentation)
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =pool_scales
# auxiliary head attributes (semantic segmentation)
_SCREAMING_SNAKE_CASE =use_auxiliary_head
_SCREAMING_SNAKE_CASE =auxiliary_loss_weight
_SCREAMING_SNAKE_CASE =auxiliary_channels
_SCREAMING_SNAKE_CASE =auxiliary_num_convs
_SCREAMING_SNAKE_CASE =auxiliary_concat_input
_SCREAMING_SNAKE_CASE =semantic_loss_ignore_index
class A__ ( A__ ):
A__ = version.parse('1.11' )
@property
def A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Tuple ) -> float:
'''simple docstring'''
return 1e-4
| 405 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 | 0 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : List[Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
_UpperCAmelCase =os.path.abspath(_lowerCamelCase )
logger.info(F"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
_UpperCAmelCase =tf.train.list_variables(_lowerCamelCase )
_UpperCAmelCase =[]
_UpperCAmelCase =[]
_UpperCAmelCase =[]
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_UpperCAmelCase =full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(F"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
_UpperCAmelCase =name[1:]
# figure out how many levels deep the name is
_UpperCAmelCase =0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(_lowerCamelCase )
# read data
_UpperCAmelCase =tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
names.append("/".join(_lowerCamelCase ) )
arrays.append(_lowerCamelCase )
logger.info(F"Read a total of {len(_lowerCamelCase ):,} layers" )
# Sanity check
if len(set(_lowerCamelCase ) ) != 1:
raise ValueError(F"Found layer names with different depths (layer depth {list(set(_lowerCamelCase ) )})" )
_UpperCAmelCase =list(set(_lowerCamelCase ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(_lowerCamelCase , _lowerCamelCase ):
_UpperCAmelCase =full_name.split("/" )
_UpperCAmelCase =model
_UpperCAmelCase =[]
for i, m_name in enumerate(_lowerCamelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
_UpperCAmelCase =int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "embeddings" )
_UpperCAmelCase =getattr(_lowerCamelCase , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
_UpperCAmelCase =getattr(_lowerCamelCase , "encoder" )
_UpperCAmelCase =getattr(_lowerCamelCase , "layer" )
_UpperCAmelCase =pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "pooler" )
_UpperCAmelCase =getattr(_lowerCamelCase , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
_UpperCAmelCase =getattr(_lowerCamelCase , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
_UpperCAmelCase =getattr(_lowerCamelCase , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
_UpperCAmelCase =getattr(_lowerCamelCase , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
_UpperCAmelCase =getattr(_lowerCamelCase , "token_type_embeddings" )
else:
raise ValueError(F"Unknown embedding layer with name {full_name}" )
trace.append("weight" )
_UpperCAmelCase =getattr(_lowerCamelCase , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "attention" )
_UpperCAmelCase =getattr(_lowerCamelCase , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "attention" )
_UpperCAmelCase =getattr(_lowerCamelCase , "output" )
_UpperCAmelCase =getattr(_lowerCamelCase , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "attention" )
_UpperCAmelCase =getattr(_lowerCamelCase , "output" )
_UpperCAmelCase =getattr(_lowerCamelCase , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "output" )
_UpperCAmelCase =getattr(_lowerCamelCase , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "output" )
_UpperCAmelCase =getattr(_lowerCamelCase , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
_UpperCAmelCase =getattr(_lowerCamelCase , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
_UpperCAmelCase =getattr(_lowerCamelCase , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
_UpperCAmelCase =getattr(_lowerCamelCase , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "intermediate" )
_UpperCAmelCase =getattr(_lowerCamelCase , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
_UpperCAmelCase =getattr(_lowerCamelCase , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
_UpperCAmelCase =getattr(_lowerCamelCase , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
_UpperCAmelCase =getattr(_lowerCamelCase , "weight" )
else:
logger.warning(F"Ignored {m_name}" )
# for certain layers reshape is necessary
_UpperCAmelCase =".".join(_lowerCamelCase )
if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , _lowerCamelCase ) or re.match(
r"(\S+)\.attention\.output\.dense\.weight" , _lowerCamelCase ):
_UpperCAmelCase =array.reshape(pointer.data.shape )
if "kernel" in full_name:
_UpperCAmelCase =array.transpose()
if pointer.shape == array.shape:
_UpperCAmelCase =torch.from_numpy(_lowerCamelCase )
else:
raise ValueError(
F"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
F" {array.shape}" )
logger.info(F"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
# Instantiate model
logger.info(F"Loading model based on config from {config_path}..." )
_UpperCAmelCase =BertConfig.from_json_file(_lowerCamelCase )
_UpperCAmelCase =BertModel(_lowerCamelCase )
# Load weights from checkpoint
logger.info(F"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
logger.info(F"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
snake_case__ : List[str] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 702 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[int] = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
snake_case__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 592 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase_ : Optional[int] = logging.getLogger(__name__)
def _lowerCamelCase () -> Optional[Any]:
a__ = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=__lowerCamelCase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=__lowerCamelCase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=__lowerCamelCase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=__lowerCamelCase , default=1000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=__lowerCamelCase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=__lowerCamelCase , type=__lowerCamelCase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=__lowerCamelCase , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=__lowerCamelCase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
a__ = parser.parse_args()
return args
def _lowerCamelCase (__lowerCamelCase : int ) -> Union[str, Any]:
def fn(__lowerCamelCase : List[str] ):
return tokenizer(examples["text"] )
return fn
def _lowerCamelCase (__lowerCamelCase : Dict ) -> Optional[int]:
a__ = []
for i in range(len(tokenized_data["input_ids"] ) ):
a__ = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a__ = tf.train.Features(feature=__lowerCamelCase )
a__ = tf.train.Example(features=__lowerCamelCase )
a__ = example.SerializeToString()
records.append(__lowerCamelCase )
return records
def _lowerCamelCase (__lowerCamelCase : Tuple ) -> Dict:
a__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
a__ = min(len(__lowerCamelCase ) , args.limit )
a__ = dataset.select(range(__lowerCamelCase ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
a__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
a__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
a__ = tokenize_function(__lowerCamelCase )
a__ = dataset.map(__lowerCamelCase , batched=__lowerCamelCase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__lowerCamelCase : Union[str, Any] ):
# Concatenate all texts.
a__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
a__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a__ = {
k: [t[i : i + args.max_length] for i in range(0 , __lowerCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a__ = dataset_tokenized.map(__lowerCamelCase , batched=__lowerCamelCase , batch_size=1000 , num_proc=4 )
a__ = 0
a__ = 0
for shard in range(0 , len(__lowerCamelCase ) , args.shard_size ):
a__ = grouped_dataset[shard : shard + args.shard_size]
a__ = len(dataset_snapshot["input_ids"] )
a__ = os.path.join(__lowerCamelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
a__ = get_serialized_examples(__lowerCamelCase )
with tf.io.TFRecordWriter(__lowerCamelCase ) as out_file:
for i in range(len(__lowerCamelCase ) ):
a__ = serialized_examples[i]
out_file.write(__lowerCamelCase )
print("Wrote file {} containing {} records".format(__lowerCamelCase , __lowerCamelCase ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , "w" ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = parse_args()
main(args)
| 489 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class UpperCamelCase__ :
def __init__( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : List[str]=None , lowerCamelCase : Any=None , lowerCamelCase : Tuple=None , lowerCamelCase : Union[str, Any]="resnet50" , lowerCamelCase : Any=3 , lowerCamelCase : Dict=3_2 , lowerCamelCase : str=3 , lowerCamelCase : Tuple=True , lowerCamelCase : Any=True , ):
'''simple docstring'''
a__ = parent
a__ = out_indices if out_indices is not None else [4]
a__ = stage_names
a__ = out_features
a__ = backbone
a__ = batch_size
a__ = image_size
a__ = num_channels
a__ = use_pretrained_backbone
a__ = is_training
def __a ( self : Optional[int] ):
'''simple docstring'''
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = self.get_config()
return config, pixel_values
def __a ( self : Dict ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __a ( self : List[str] , lowerCamelCase : str , lowerCamelCase : int ):
'''simple docstring'''
a__ = TimmBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
a__ = model(lowerCamelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def __a ( self : str ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ , a__ = config_and_inputs
a__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class UpperCamelCase__ ( __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,unittest.TestCase ):
lowerCAmelCase__ : str = (TimmBackbone,) if is_torch_available() else ()
lowerCAmelCase__ : List[str] = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Tuple = False
def __a ( self : Any ):
'''simple docstring'''
a__ = TimmBackboneModelTester(self )
a__ = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __a ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : List[str] ):
'''simple docstring'''
a__ = "resnet18"
a__ = "microsoft/resnet-18"
a__ = AutoBackbone.from_pretrained(lowerCamelCase , use_timm_backbone=lowerCamelCase )
a__ = AutoBackbone.from_pretrained(lowerCamelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a__ = AutoBackbone.from_pretrained(lowerCamelCase , use_timm_backbone=lowerCamelCase , out_indices=[1, 2, 3] )
a__ = AutoBackbone.from_pretrained(lowerCamelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def __a ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def __a ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def __a ( self : int ):
'''simple docstring'''
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def __a ( self : int ):
'''simple docstring'''
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def __a ( self : str ):
'''simple docstring'''
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __a ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def __a ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def __a ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __a ( self : int ):
'''simple docstring'''
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("Safetensors is not supported by timm." )
def __a ( self : int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __a ( self : Dict ):
'''simple docstring'''
pass
def __a ( self : Optional[int] ):
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(lowerCamelCase )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __a ( self : str ):
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = True
a__ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ = self.all_model_classes[0]
a__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
a__ = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
a__ = model(**lowerCamelCase )
a__ = outputs[0][-1]
# Encoder-/Decoder-only models
a__ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCamelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __a ( self : Any ):
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(**lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a__ = copy.deepcopy(lowerCamelCase )
a__ = None
a__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(**lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a__ = copy.deepcopy(lowerCamelCase )
a__ = False
a__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(**lowerCamelCase )
| 489 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case : Union[str, Any] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 615 |
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A__ :
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.get_dummy_input()
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=False , _SCREAMING_SNAKE_CASE: Dict=False , ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = 4
__lowerCAmelCase : List[str] = 32
__lowerCAmelCase : str = (32, 32)
__lowerCAmelCase : int = torch.manual_seed(0)
__lowerCAmelCase : Dict = torch.device(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = (batch_size, num_channels) + sizes
__lowerCAmelCase : Dict = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = {"hidden_states": hidden_states}
if include_temb:
__lowerCAmelCase : List[str] = 128
__lowerCAmelCase : Dict = randn_tensor((batch_size, temb_channels) , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE)
if include_res_hidden_states_tuple:
__lowerCAmelCase : Dict = torch.manual_seed(1)
__lowerCAmelCase : int = (randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE),)
if include_encoder_hidden_states:
__lowerCAmelCase : int = floats_tensor((batch_size, 32, 32)).to(_SCREAMING_SNAKE_CASE)
if include_skip_sample:
__lowerCAmelCase : Optional[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE)
return dummy_input
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
__lowerCAmelCase : Tuple = 32
if self.block_type == "mid":
init_dict.pop("out_channels")
__lowerCAmelCase : str = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.prepare_init_args_and_inputs_for_common()
__lowerCAmelCase : Dict = self.block_class(**_SCREAMING_SNAKE_CASE)
unet_block.to(_SCREAMING_SNAKE_CASE)
unet_block.eval()
with torch.no_grad():
__lowerCAmelCase : int = unet_block(**_SCREAMING_SNAKE_CASE)
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[Any] = output[0]
self.assertEqual(output.shape , self.output_shape)
__lowerCAmelCase : str = output[0, -1, -3:, -3:]
__lowerCAmelCase : List[Any] = torch.tensor(_SCREAMING_SNAKE_CASE).to(_SCREAMING_SNAKE_CASE)
assert torch_all_close(output_slice.flatten() , _SCREAMING_SNAKE_CASE , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
__lowerCAmelCase : str = self.block_class(**_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.train()
__lowerCAmelCase : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE)
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Union[str, Any] = output[0]
__lowerCAmelCase : List[Any] = torch.device(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = randn_tensor(output.shape , device=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = torch.nn.functional.mse_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
loss.backward() | 615 | 1 |
"""simple docstring"""
import math
class a__ :
def __init__( self , _a=0 ): # a graph with Node 0,1,...,N-1
lowercase : str = n
lowercase : Optional[int] = [
[math.inf for j in range(0 , lowerCamelCase__ )] for i in range(0 , lowerCamelCase__ )
] # adjacency matrix for weight
lowercase : Tuple = [
[math.inf for j in range(0 , lowerCamelCase__ )] for i in range(0 , lowerCamelCase__ )
] # dp[i][j] stores minimum distance from i to j
def __magic_name__ ( self , _a , _a , _a ):
lowercase : Union[str, Any] = w
def __magic_name__ ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __magic_name__ ( self , _a , _a ):
return self.dp[u][v]
if __name__ == "__main__":
_A : List[str] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 361 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 332 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
lowerCamelCase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
lowerCamelCase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """whisper"""
SCREAMING_SNAKE_CASE__ = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self , SCREAMING_SNAKE_CASE_=5_18_65 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=5_02_57 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=15_00 , SCREAMING_SNAKE_CASE_=4_48 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[2_20, 5_02_56] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.05 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = use_cache
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__ = max_source_positions
UpperCamelCase__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
UpperCamelCase__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = apply_spec_augment
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
UpperCamelCase__ = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class __A( __lowerCamelCase ):
"""simple docstring"""
@property
def UpperCAmelCase_ (self ):
UpperCamelCase__ = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
UpperCamelCase__ = {0: """batch"""}
else:
UpperCamelCase__ = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction="""inputs""" )
return common_inputs
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 2_20_50 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 2_20 , ):
UpperCamelCase__ = OrderedDict()
UpperCamelCase__ = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = encoder_inputs["""input_features"""].shape[2]
UpperCamelCase__ = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCamelCase__ = super().generate_dummy_inputs(
preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = encoder_inputs.pop("""input_features""" )
UpperCamelCase__ = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
UpperCamelCase__ = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def UpperCAmelCase_ (self ):
return 1E-3
| 86 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCamelCase_ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __magic_name__ ( __a : Any ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __magic_name__ ( __a : List[Any] , __a : Any ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCamelCase__ = False
elif args.student_type == "gpt2":
UpperCamelCase__ = False
def __magic_name__ ( __a : int , __a : Dict ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCamelCase__ = False
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=__a , required=__a , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=__a , required=__a , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=__a , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__a , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=__a , required=__a , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=__a , type=__a , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__a , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=__a , required=__a , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=__a , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=__a , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=__a , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=__a , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=__a , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=__a , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=__a , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=__a , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=__a , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=__a , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=__a , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=__a , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=__a , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=__a , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=__a , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__a , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=__a , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__a , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__a , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=__a , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=__a , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=__a , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=__a , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=__a , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=__a , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=__a , default=4_000 , help="""Checkpoint interval.""" )
UpperCamelCase__ = parser.parse_args()
sanity_checks(__a )
# ARGS #
init_gpu_params(__a )
set_seed(__a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(f"Param: {args}" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(__a ) , __a , indent=4 )
git_log(args.dump_path )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.student_type]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCamelCase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCamelCase__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCamelCase__ = tokenizer.all_special_tokens.index(__a )
UpperCamelCase__ = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}" )
UpperCamelCase__ = special_tok_ids
UpperCamelCase__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}" )
with open(args.data_file , """rb""" ) as fp:
UpperCamelCase__ = pickle.load(__a )
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , """rb""" ) as fp:
UpperCamelCase__ = pickle.load(__a )
UpperCamelCase__ = np.maximum(__a , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCamelCase__ = 0.0 # do not predict special tokens
UpperCamelCase__ = torch.from_numpy(__a )
else:
UpperCamelCase__ = None
UpperCamelCase__ = LmSeqsDataset(params=__a , data=__a )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f"Loading student config from {args.student_config}" )
UpperCamelCase__ = student_config_class.from_pretrained(args.student_config )
UpperCamelCase__ = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" )
UpperCamelCase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a )
else:
UpperCamelCase__ = student_model_class(__a )
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}" )
logger.info("""Student loaded.""" )
# TEACHER #
UpperCamelCase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a )
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}" )
logger.info(f"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__a , __a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__a , __a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCamelCase__ = Distiller(
params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 86 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a = sys.version_info >= (3, 10)
def __magic_name__ ( __UpperCAmelCase=None , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__UpperCAmelCase )
@dataclass
class __a :
__UpperCamelCase : int
__UpperCamelCase : float
__UpperCamelCase : str
__UpperCamelCase : bool
@dataclass
class __a :
__UpperCamelCase : int = 42
__UpperCamelCase : str = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class __a :
__UpperCamelCase : bool = False
__UpperCamelCase : bool = True
__UpperCamelCase : Optional[bool] = None
class __a ( _snake_case ):
__UpperCamelCase : Any = 'titi'
__UpperCamelCase : Union[str, Any] = 'toto'
class __a ( _snake_case ):
__UpperCamelCase : Optional[int] = 'titi'
__UpperCamelCase : int = 'toto'
__UpperCamelCase : Optional[Any] = 42
@dataclass
class __a :
__UpperCamelCase : BasicEnum = "toto"
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BasicEnum(self.foo )
@dataclass
class __a :
__UpperCamelCase : MixedTypeEnum = "toto"
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MixedTypeEnum(self.foo )
@dataclass
class __a :
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[float] = field(default=_snake_case, metadata={'help': 'help message'} )
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[List[str]] = list_field(default=[] )
__UpperCamelCase : Optional[List[int]] = list_field(default=[] )
@dataclass
class __a :
__UpperCamelCase : List[int] = list_field(default=[] )
__UpperCamelCase : List[int] = list_field(default=[1, 2, 3] )
__UpperCamelCase : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
__UpperCamelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __a :
__UpperCamelCase : List[int] = field()
__UpperCamelCase : str = field()
__UpperCamelCase : BasicEnum = field()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BasicEnum(self.required_enum )
@dataclass
class __a :
__UpperCamelCase : int
__UpperCamelCase : "BasicEnum" = field()
__UpperCamelCase : "Optional[bool]" = None
__UpperCamelCase : "str" = field(default='toto', metadata={'help': 'help message'} )
__UpperCamelCase : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class __a :
__UpperCamelCase : bool = False
__UpperCamelCase : bool = True
__UpperCamelCase : bool | None = None
@dataclass
class __a :
__UpperCamelCase : int | None = None
__UpperCamelCase : float | None = field(default=_snake_case, metadata={'help': 'help message'} )
__UpperCamelCase : str | None = None
__UpperCamelCase : list[str] | None = list_field(default=[] )
__UpperCamelCase : list[int] | None = list_field(default=[] )
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : argparse.ArgumentParser ,lowerCamelCase : argparse.ArgumentParser ):
'''simple docstring'''
self.assertEqual(len(a._actions ) ,len(b._actions ) )
for x, y in zip(a._actions ,b._actions ):
__SCREAMING_SNAKE_CASE = {k: v for k, v in vars(lowerCamelCase ).items() if k != """container"""}
__SCREAMING_SNAKE_CASE = {k: v for k, v in vars(lowerCamelCase ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" ,lowerCamelCase ) and yy.get("""choices""" ,lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](lowerCamelCase ) ,yy["""type"""](lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,type=lowerCamelCase ,required=lowerCamelCase )
expected.add_argument("""--bar""" ,type=lowerCamelCase ,required=lowerCamelCase )
expected.add_argument("""--baz""" ,type=lowerCamelCase ,required=lowerCamelCase )
expected.add_argument("""--flag""" ,type=lowerCamelCase ,default=lowerCamelCase ,const=lowerCamelCase ,nargs="""?""" )
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((__SCREAMING_SNAKE_CASE) , ) = parser.parse_args_into_dataclasses(lowerCamelCase ,look_for_args_file=lowerCamelCase )
self.assertFalse(example.flag )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,default=42 ,type=lowerCamelCase )
expected.add_argument("""--baz""" ,default="""toto""" ,type=lowerCamelCase ,help="""help message""" )
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,type=lowerCamelCase ,default=lowerCamelCase ,const=lowerCamelCase ,nargs="""?""" )
expected.add_argument("""--baz""" ,type=lowerCamelCase ,default=lowerCamelCase ,const=lowerCamelCase ,nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" ,action="""store_false""" ,default=lowerCamelCase ,dest="""baz""" )
expected.add_argument("""--opt""" ,type=lowerCamelCase ,default=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase )
for dataclass_type in dataclass_types:
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(lowerCamelCase ,Namespace(foo=lowerCamelCase ,baz=lowerCamelCase ,opt=lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(lowerCamelCase ,Namespace(foo=lowerCamelCase ,baz=lowerCamelCase ,opt=lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(lowerCamelCase ,Namespace(foo=lowerCamelCase ,baz=lowerCamelCase ,opt=lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(lowerCamelCase ,Namespace(foo=lowerCamelCase ,baz=lowerCamelCase ,opt=lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(lowerCamelCase ,Namespace(foo=lowerCamelCase ,baz=lowerCamelCase ,opt=lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" ,default="""toto""" ,choices=["""titi""", """toto""", 42] ,type=make_choice_type_function(["""titi""", """toto""", 42] ) ,)
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(args.foo ,"""toto""" )
__SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.toto )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo ,"""titi""" )
__SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.titi )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo ,42 )
__SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.fourtytwo )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
@dataclass
class __a :
__UpperCamelCase : Literal["titi", "toto", 42] = "toto"
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" ,default="""toto""" ,choices=("""titi""", """toto""", 42) ,type=make_choice_type_function(["""titi""", """toto""", 42] ) ,)
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(args.foo ,"""toto""" )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo ,"""titi""" )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo ,42 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" ,nargs="""+""" ,default=[] ,type=lowerCamelCase )
expected.add_argument("""--bar_int""" ,nargs="""+""" ,default=[1, 2, 3] ,type=lowerCamelCase )
expected.add_argument("""--foo_str""" ,nargs="""+""" ,default=["""Hallo""", """Bonjour""", """Hello"""] ,type=lowerCamelCase )
expected.add_argument("""--foo_float""" ,nargs="""+""" ,default=[0.1, 0.2, 0.3] ,type=lowerCamelCase )
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(
lowerCamelCase ,Namespace(foo_int=[] ,bar_int=[1, 2, 3] ,foo_str=["""Hallo""", """Bonjour""", """Hello"""] ,foo_float=[0.1, 0.2, 0.3] ) ,)
__SCREAMING_SNAKE_CASE = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(lowerCamelCase ,Namespace(foo_int=[1] ,bar_int=[2, 3] ,foo_str=["""a""", """b""", """c"""] ,foo_float=[0.1, 0.7] ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,default=lowerCamelCase ,type=lowerCamelCase )
expected.add_argument("""--bar""" ,default=lowerCamelCase ,type=lowerCamelCase ,help="""help message""" )
expected.add_argument("""--baz""" ,default=lowerCamelCase ,type=lowerCamelCase )
expected.add_argument("""--ces""" ,nargs="""+""" ,default=[] ,type=lowerCamelCase )
expected.add_argument("""--des""" ,nargs="""+""" ,default=[] ,type=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase )
for dataclass_type in dataclass_types:
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(lowerCamelCase ,Namespace(foo=lowerCamelCase ,bar=lowerCamelCase ,baz=lowerCamelCase ,ces=[] ,des=[] ) )
__SCREAMING_SNAKE_CASE = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(lowerCamelCase ,Namespace(foo=12 ,bar=3.14 ,baz="""42""" ,ces=["""a""", """b""", """c"""] ,des=[1, 2, 3] ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument("""--required_list""" ,nargs="""+""" ,type=lowerCamelCase ,required=lowerCamelCase )
expected.add_argument("""--required_str""" ,type=lowerCamelCase ,required=lowerCamelCase )
expected.add_argument(
"""--required_enum""" ,type=make_choice_type_function(["""titi""", """toto"""] ) ,choices=["""titi""", """toto"""] ,required=lowerCamelCase ,)
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,type=lowerCamelCase ,required=lowerCamelCase )
expected.add_argument(
"""--required_enum""" ,type=make_choice_type_function(["""titi""", """toto"""] ) ,choices=["""titi""", """toto"""] ,required=lowerCamelCase ,)
expected.add_argument("""--opt""" ,type=lowerCamelCase ,default=lowerCamelCase )
expected.add_argument("""--baz""" ,default="""toto""" ,type=lowerCamelCase ,help="""help message""" )
expected.add_argument("""--foo_str""" ,nargs="""+""" ,default=["""Hallo""", """Bonjour""", """Hello"""] ,type=lowerCamelCase )
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
__SCREAMING_SNAKE_CASE = parser.parse_dict(lowerCamelCase )[0]
__SCREAMING_SNAKE_CASE = BasicExample(**lowerCamelCase )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(lowerCamelCase ,parser.parse_dict ,lowerCamelCase ,allow_extra_keys=lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase ,"""temp_json""" )
os.mkdir(lowerCamelCase )
with open(temp_local_path + """.json""" ,"""w+""" ) as f:
json.dump(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
__SCREAMING_SNAKE_CASE = BasicExample(**lowerCamelCase )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase ,"""temp_yaml""" )
os.mkdir(lowerCamelCase )
with open(temp_local_path + """.yaml""" ,"""w+""" ) as f:
yaml.dump(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
__SCREAMING_SNAKE_CASE = BasicExample(**lowerCamelCase )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
| 109 |
from __future__ import annotations
import requests
_lowerCamelCase : str = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 1 , lowercase_ = "new" , lowercase_ = None ) -> dict:
"""simple docstring"""
A__ = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowercase_ ) - valid_terms ) ):
A__ = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(lowercase_ )
A__ = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
A__ = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowercase_ )}
A__ = {}
for id_ in range(lowercase_ ):
A__ = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 87 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Dict = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[Any] = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
__magic_name__ : List[Any] = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__magic_name__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCAmelCase_ = Features({"""text""": Value("""string""" )} )
lowerCAmelCase_ = Features({"""labels""": ClassLabel} )
lowerCAmelCase_ = "text"
lowerCAmelCase_ = "labels"
def lowercase_ ( self , __UpperCamelCase ):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
A_ = copy.deepcopy(self )
A_ = self.label_schema.copy()
A_ = features[self.label_column]
A_ = label_schema
return task_template
@property
def lowercase_ ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 608 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : jnp.ndarray
@flax_register_to_config
class _snake_case (nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__A : int =32
__A : int =4
__A : int =4
__A : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__A : Tuple[str] =("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__A : Union[bool, Tuple[bool]] =False
__A : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
__A : int =2
__A : Union[int, Tuple[int]] =8
__A : Optional[Union[int, Tuple[int]]] =None
__A : int =12_80
__A : float =0.0
__A : bool =False
__A : jnp.dtype =jnp.floataa
__A : bool =True
__A : int =0
__A : bool =False
def UpperCamelCase__ ( self ,_snake_case ):
# init input tensors
UpperCAmelCase_ : List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase_ : Tuple = jnp.zeros(_snake_case ,dtype=jnp.floataa )
UpperCAmelCase_ : Optional[Any] = jnp.ones((1,) ,dtype=jnp.intaa )
UpperCAmelCase_ : Optional[Any] = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = jax.random.split(_snake_case )
UpperCAmelCase_ : int = {"params": params_rng, "dropout": dropout_rng}
return self.init(_snake_case ,_snake_case ,_snake_case ,_snake_case )["params"]
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.block_out_channels
UpperCAmelCase_ : Tuple = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase_ : Union[str, Any] = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase_ : List[Any] = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
UpperCAmelCase_ : Tuple = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
UpperCAmelCase_ : int = FlaxTimestepEmbedding(_snake_case ,dtype=self.dtype )
UpperCAmelCase_ : str = self.only_cross_attention
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Dict = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : List[str] = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase_ : Any = output_channel
UpperCAmelCase_ : List[Any] = block_out_channels[i]
UpperCAmelCase_ : str = i == len(_snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase_ : Optional[Any] = FlaxCrossAttnDownBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
else:
UpperCAmelCase_ : Optional[int] = FlaxDownBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(_snake_case )
UpperCAmelCase_ : Any = down_blocks
# mid
UpperCAmelCase_ : Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
# up
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Union[str, Any] = list(reversed(_snake_case ) )
UpperCAmelCase_ : Union[str, Any] = list(reversed(_snake_case ) )
UpperCAmelCase_ : str = list(reversed(_snake_case ) )
UpperCAmelCase_ : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
UpperCAmelCase_ : Tuple = output_channel
UpperCAmelCase_ : Any = reversed_block_out_channels[i]
UpperCAmelCase_ : List[Any] = reversed_block_out_channels[min(i + 1 ,len(_snake_case ) - 1 )]
UpperCAmelCase_ : Optional[Any] = i == len(_snake_case ) - 1
if up_block_type == "CrossAttnUpBlock2D":
UpperCAmelCase_ : Dict = FlaxCrossAttnUpBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,prev_output_channel=_snake_case ,num_layers=self.layers_per_block + 1 ,num_attention_heads=reversed_num_attention_heads[i] ,add_upsample=not is_final_block ,dropout=self.dropout ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
else:
UpperCAmelCase_ : str = FlaxUpBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,prev_output_channel=_snake_case ,num_layers=self.layers_per_block + 1 ,add_upsample=not is_final_block ,dropout=self.dropout ,dtype=self.dtype ,)
up_blocks.append(_snake_case )
UpperCAmelCase_ : List[Any] = output_channel
UpperCAmelCase_ : List[str] = up_blocks
# out
UpperCAmelCase_ : Optional[int] = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
UpperCAmelCase_ : str = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case = True ,_snake_case = False ,):
# 1. time
if not isinstance(_snake_case ,jnp.ndarray ):
UpperCAmelCase_ : Optional[int] = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(_snake_case ,jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : List[str] = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase_ : Optional[int] = jnp.expand_dims(_snake_case ,0 )
UpperCAmelCase_ : Union[str, Any] = self.time_proj(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.time_embedding(_snake_case )
# 2. pre-process
UpperCAmelCase_ : Optional[int] = jnp.transpose(_snake_case ,(0, 2, 3, 1) )
UpperCAmelCase_ : Union[str, Any] = self.conv_in(_snake_case )
# 3. down
UpperCAmelCase_ : int = (sample,)
for down_block in self.down_blocks:
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = down_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train )
else:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = down_block(_snake_case ,_snake_case ,deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
UpperCAmelCase_ : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_snake_case ,_snake_case ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase_ : Optional[Any] = new_down_block_res_samples
# 4. mid
UpperCAmelCase_ : Dict = self.mid_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
UpperCAmelCase_ : List[str] = down_block_res_samples[-(self.layers_per_block + 1) :]
UpperCAmelCase_ : Tuple = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Dict = up_block(
_snake_case ,temb=_snake_case ,encoder_hidden_states=_snake_case ,res_hidden_states_tuple=_snake_case ,deterministic=not train ,)
else:
UpperCAmelCase_ : Tuple = up_block(_snake_case ,temb=_snake_case ,res_hidden_states_tuple=_snake_case ,deterministic=not train )
# 6. post-process
UpperCAmelCase_ : Optional[int] = self.conv_norm_out(_snake_case )
UpperCAmelCase_ : List[Any] = nn.silu(_snake_case )
UpperCAmelCase_ : str = self.conv_out(_snake_case )
UpperCAmelCase_ : str = jnp.transpose(_snake_case ,(0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_snake_case )
| 71 | import datasets
__a : Optional[Any] = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
__a : Optional[Any] = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
__a : str = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )} | 534 | 0 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Union[str, Any]=32, lowerCamelCase : int=2, lowerCamelCase : int=3, lowerCamelCase : Optional[Any]=16, lowerCamelCase : Optional[int]=[1, 2, 1], lowerCamelCase : str=[2, 2, 4], lowerCamelCase : Optional[Any]=2, lowerCamelCase : Any=2.0, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Optional[Any]=0.0, lowerCamelCase : Union[str, Any]=0.0, lowerCamelCase : List[str]=0.1, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Tuple=False, lowerCamelCase : Any=True, lowerCamelCase : Tuple=0.02, lowerCamelCase : Dict=1E-5, lowerCamelCase : str=True, lowerCamelCase : int=None, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : int=10, lowerCamelCase : Optional[int]=8, lowerCamelCase : Union[str, Any]=["stage1", "stage2", "stage3"], lowerCamelCase : List[Any]=[1, 2, 3], ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = patch_norm
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = is_training
lowercase__ = scope
lowercase__ = use_labels
lowercase__ = type_sequence_label_size
lowercase__ = encoder_stride
lowercase__ = out_features
lowercase__ = out_indices
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def lowercase__ ( self : Optional[int], lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModel(config=A_ )
model.to(A_ )
model.eval()
lowercase__ = model(A_ )
lowercase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self : Optional[Any], lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = MaskFormerSwinBackbone(config=A_ )
model.to(A_ )
model.eval()
lowercase__ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(A_ ):
lowercase__ = ['''stem''']
lowercase__ = MaskFormerSwinBackbone(config=A_ )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModelTester(self )
lowercase__ = ConfigTester(self, config_class=A_, embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def lowercase__ ( self : Any ):
'''simple docstring'''
pass
def lowercase__ ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : int ):
'''simple docstring'''
return
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
@unittest.skip('''Swin does not use inputs_embeds''' )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def lowercase__ ( self : str ):
'''simple docstring'''
pass
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_, nn.Linear ) )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(A_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], A_ )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def lowercase__ ( self : Any ):
'''simple docstring'''
pass
def lowercase__ ( self : Dict, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(A_, A_ ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester, '''expected_num_hidden_layers''', len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A_ ), A_ )
# Swin has a different seq_length
lowercase__ = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(A_, A_, A_, A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(A_, A_, A_, A_ )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(A_, A_, A_, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(A_, A_, A_, (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
pass
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCamelCase : List[str] ):
lowercase__ = 0
return t
def check_equivalence(lowerCamelCase : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Optional[int], lowerCamelCase : List[str]={} ):
with torch.no_grad():
lowercase__ = model(**A_, return_dict=A_, **A_ )
lowercase__ = model(**A_, return_dict=A_, **A_ ).to_tuple()
def recursive_check(lowerCamelCase : List[Any], lowerCamelCase : List[str] ):
if isinstance(A_, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(A_, A_ ):
recursive_check(A_, A_ )
elif isinstance(A_, A_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values() ):
recursive_check(A_, A_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(A_ ), set_nan_tensor_to_zero(A_ ), atol=1E-5 ), msg=(
'''Tuple and dict output are not equal. Difference:'''
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(A_ ).any()} and `inf`: {torch.isinf(A_ )}. Dict has"""
F""" `nan`: {torch.isnan(A_ ).any()} and `inf`: {torch.isinf(A_ )}."""
), )
recursive_check(A_, A_ )
for model_class in self.all_model_classes:
lowercase__ = model_class(A_ )
model.to(A_ )
model.eval()
lowercase__ = self._prepare_for_class(A_, A_ )
lowercase__ = self._prepare_for_class(A_, A_ )
check_equivalence(A_, A_, A_ )
lowercase__ = self._prepare_for_class(A_, A_, return_labels=A_ )
lowercase__ = self._prepare_for_class(A_, A_, return_labels=A_ )
check_equivalence(A_, A_, A_ )
lowercase__ = self._prepare_for_class(A_, A_ )
lowercase__ = self._prepare_for_class(A_, A_ )
check_equivalence(A_, A_, A_, {'''output_hidden_states''': True} )
lowercase__ = self._prepare_for_class(A_, A_, return_labels=A_ )
lowercase__ = self._prepare_for_class(A_, A_, return_labels=A_ )
check_equivalence(A_, A_, A_, {'''output_hidden_states''': True} )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ,_UpperCAmelCase ):
"""simple docstring"""
lowercase__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__ = MaskFormerSwinConfig
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModelTester(self )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ = backbone_class(A_ )
backbone.to(A_ )
backbone.eval()
lowercase__ = backbone(**A_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps, A_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels ):
self.assertTrue(feature_map.shape[:2], (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ = backbone(**A_, output_hidden_states=A_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ), len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ = backbone(**A_, output_attentions=A_ )
self.assertIsNotNone(outputs.attentions )
| 705 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Dict = {'''vocab_file''': '''vocab.txt'''}
__A : Any = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__A : Optional[Any] = {
'''facebook/esm2_t6_8M_UR50D''': 1024,
'''facebook/esm2_t12_35M_UR50D''': 1024,
}
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
with open(__UpperCamelCase, 'r' ) as f:
lowerCAmelCase : Any = f.read().splitlines()
return [l.strip() for l in lines]
class __A ( A__ ):
lowerCAmelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Optional[int]="<cls>" , UpperCAmelCase_ : Tuple="<pad>" , UpperCAmelCase_ : Optional[int]="<mask>" , UpperCAmelCase_ : List[str]="<eos>" , **UpperCAmelCase_ : Tuple , ):
super().__init__(**a_ )
lowerCAmelCase : Dict = load_vocab_file(a_ )
lowerCAmelCase : Union[str, Any] = dict(enumerate(self.all_tokens ) )
lowerCAmelCase : Any = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCAmelCase : Dict = unk_token
lowerCAmelCase : Union[str, Any] = cls_token
lowerCAmelCase : str = pad_token
lowerCAmelCase : int = mask_token
lowerCAmelCase : str = eos_token
lowerCAmelCase : Tuple = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowercase__ ( self : str , UpperCAmelCase_ : int ):
return self._id_to_token.get(a_ , self.unk_token )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : str ):
return self._token_to_id.get(a_ , self._token_to_id.get(self.unk_token ) )
def lowercase__ ( self : str , UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ):
return text.split()
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Optional[Any]=False ):
return len(self._id_to_token )
def lowercase__ ( self : Tuple ):
return {token: i for i, token in enumerate(self.all_tokens )}
def lowercase__ ( self : str , UpperCAmelCase_ : str ):
return self._token_to_id.get(a_ , self._token_to_id.get(self.unk_token ) )
def lowercase__ ( self : Any , UpperCAmelCase_ : int ):
return self._id_to_token.get(a_ , self.unk_token )
def lowercase__ ( self : int , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[int] = [self.cls_token_id]
lowerCAmelCase : List[Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowercase__ ( self : Dict , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCAmelCase : List[str] = [1] + ([0] * len(a_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(a_ ) + [1]
return mask
def lowercase__ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : List[str] = os.path.join(a_ , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(a_ , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def lowercase__ ( self : List[Any] ):
return self.get_vocab_size(with_added_tokens=a_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Union[List[str], List[AddedToken]] , UpperCAmelCase_ : bool = False ):
return super()._add_tokens(a_ , special_tokens=a_ )
| 343 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Optional[int] = ["""image_processor""", """tokenizer"""]
a_ : Union[str, Any] = """ViltImageProcessor"""
a_ : Dict = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Optional[Any]=None , **a_ : str ):
lowerCAmelCase_ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowerCAmelCase_ : Tuple = kwargs.pop("feature_extractor" )
lowerCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
lowerCAmelCase_ : str = self.image_processor
def __call__( self : int , a_ : List[Any] , a_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a_ : bool = True , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Union[bool, str, TruncationStrategy] = None , a_ : Optional[int] = None , a_ : int = 0 , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = True , a_ : Optional[Union[str, TensorType]] = None , **a_ : Optional[Any] , ):
lowerCAmelCase_ : Dict = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
# add pixel_values + pixel_mask
lowerCAmelCase_ : Tuple = self.image_processor(a_ , return_tensors=a_ )
encoding.update(a_ )
return encoding
def lowerCamelCase ( self : Union[str, Any] , *a_ : Dict , **a_ : Union[str, Any] ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def lowerCamelCase ( self : Optional[Any] , *a_ : List[str] , **a_ : Any ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Tuple = self.tokenizer.model_input_names
lowerCAmelCase_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Optional[int] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 610 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[Any] = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = '''efficientnet'''
def __init__( self : Optional[int] , lowercase__ : int = 3 , lowercase__ : int = 600 , lowercase__ : float = 2.0 , lowercase__ : float = 3.1 , lowercase__ : int = 8 , lowercase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowercase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowercase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowercase__ : List[int] = [] , lowercase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowercase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowercase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowercase__ : float = 0.2_5 , lowercase__ : str = "swish" , lowercase__ : int = 2_560 , lowercase__ : str = "mean" , lowercase__ : float = 0.0_2 , lowercase__ : float = 0.0_0_1 , lowercase__ : float = 0.9_9 , lowercase__ : float = 0.5 , lowercase__ : float = 0.2 , **lowercase__ : int , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowercase__ )
_UpperCamelCase : int = num_channels
_UpperCamelCase : Any = image_size
_UpperCamelCase : Union[str, Any] = width_coefficient
_UpperCamelCase : List[str] = depth_coefficient
_UpperCamelCase : List[Any] = depth_divisor
_UpperCamelCase : Optional[Any] = kernel_sizes
_UpperCamelCase : int = in_channels
_UpperCamelCase : Any = out_channels
_UpperCamelCase : Union[str, Any] = depthwise_padding
_UpperCamelCase : Optional[Any] = strides
_UpperCamelCase : Union[str, Any] = num_block_repeats
_UpperCamelCase : List[str] = expand_ratios
_UpperCamelCase : Dict = squeeze_expansion_ratio
_UpperCamelCase : Union[str, Any] = hidden_act
_UpperCamelCase : List[str] = hidden_dim
_UpperCamelCase : Dict = pooling_type
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Any = batch_norm_eps
_UpperCamelCase : List[str] = batch_norm_momentum
_UpperCamelCase : Optional[Any] = dropout_rate
_UpperCamelCase : Dict = drop_connect_rate
_UpperCamelCase : Dict = sum(lowercase__ ) * 4
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def snake_case__ ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self : Any ) ->float:
'''simple docstring'''
return 1e-5
| 718 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = '''yolos'''
def __init__( self : int , lowercase__ : List[str]=768 , lowercase__ : Optional[Any]=12 , lowercase__ : Union[str, Any]=12 , lowercase__ : Any=3_072 , lowercase__ : List[Any]="gelu" , lowercase__ : Dict=0.0 , lowercase__ : Any=0.0 , lowercase__ : Dict=0.0_2 , lowercase__ : Tuple=1e-12 , lowercase__ : str=[512, 864] , lowercase__ : Dict=16 , lowercase__ : int=3 , lowercase__ : Optional[Any]=True , lowercase__ : List[Any]=100 , lowercase__ : str=True , lowercase__ : str=False , lowercase__ : List[str]=1 , lowercase__ : Dict=5 , lowercase__ : str=2 , lowercase__ : Optional[int]=5 , lowercase__ : Optional[int]=2 , lowercase__ : Optional[Any]=0.1 , **lowercase__ : Union[str, Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(**lowercase__ )
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : str = layer_norm_eps
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : int = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = qkv_bias
_UpperCamelCase : Dict = num_detection_tokens
_UpperCamelCase : int = use_mid_position_embeddings
_UpperCamelCase : int = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : Tuple = class_cost
_UpperCamelCase : str = bbox_cost
_UpperCamelCase : str = giou_cost
# Loss coefficients
_UpperCamelCase : List[str] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : str = eos_coefficient
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def snake_case__ ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self : Any ) ->float:
'''simple docstring'''
return 1e-4
@property
def snake_case__ ( self : List[str] ) ->int:
'''simple docstring'''
return 12
| 204 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : str = "The quick brown fox jumps over the lazy dog", ) -> bool:
"""simple docstring"""
A__ = set()
# Replace all the whitespace in our sentence
A__ = input_str.replace(" ", "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCAmelCase_ ) == 26
def _lowerCamelCase ( UpperCAmelCase_ : str = "The quick brown fox jumps over the lazy dog", ) -> bool:
"""simple docstring"""
A__ = [False] * 26
for char in input_str:
if char.islower():
A__ = True
elif char.isupper():
A__ = True
return all(UpperCAmelCase_ )
def _lowerCamelCase ( UpperCAmelCase_ : str = "The quick brown fox jumps over the lazy dog", ) -> bool:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _lowerCamelCase ( ) -> None:
"""simple docstring"""
from timeit import timeit
A__ = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()", setup=UpperCAmelCase_ ) )
print(timeit("is_pangram_faster()", setup=UpperCAmelCase_ ) )
print(timeit("is_pangram_fastest()", setup=UpperCAmelCase_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 104 |
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=14 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Optional[int]:
a : Optional[int] = parent
a : Optional[int] = batch_size
a : str = seq_length
a : int = is_training
a : Union[str, Any] = use_token_type_ids
a : Tuple = use_input_mask
a : Optional[Any] = use_labels
a : str = use_mc_token_ids
a : str = vocab_size
a : str = hidden_size
a : List[Any] = num_hidden_layers
a : str = num_attention_heads
a : Tuple = intermediate_size
a : List[Any] = hidden_act
a : List[Any] = hidden_dropout_prob
a : List[str] = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : int = type_vocab_size
a : Any = type_sequence_label_size
a : List[Any] = initializer_range
a : List[str] = num_labels
a : Optional[int] = num_choices
a : int = scope
a : Tuple = self.vocab_size - 1
def __a ( self ) -> int:
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : str = None
if self.use_input_mask:
a : Dict = random_attention_mask([self.batch_size, self.seq_length] )
a : Tuple = None
if self.use_token_type_ids:
a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : int = None
if self.use_mc_token_ids:
a : Dict = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
a : List[Any] = None
a : Optional[int] = None
a : str = None
if self.use_labels:
a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : Any = ids_tensor([self.batch_size] , self.num_choices )
a : Tuple = self.get_config()
a : int = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __a ( self ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ ) -> str:
a : Union[str, Any] = CTRLModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
a : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ ) -> Optional[int]:
a : Tuple = CTRLLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : List[str] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self ) -> Optional[int]:
a : Tuple = self.prepare_config_and_inputs()
(
(
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
),
) : Optional[Any] = config_and_inputs
a : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ ) -> Optional[Any]:
a : List[str] = self.num_labels
a : Union[str, Any] = CTRLForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __UpperCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
lowerCamelCase : Tuple =(CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCamelCase : List[Any] =(CTRLLMHeadModel,) if is_torch_available() else ()
lowerCamelCase : List[Any] =(
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Optional[int] =True
lowerCamelCase : List[str] =False
lowerCamelCase : Optional[Any] =False
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __a ( self ) -> str:
a : str = CTRLModelTester(self )
a : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __a ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __a ( self ) -> Union[str, Any]:
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCAmelCase__ )
def __a ( self ) -> int:
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __a ( self ) -> Tuple:
pass
@slow
def __a ( self ) -> Optional[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = CTRLModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def __a ( self ) -> Tuple:
pass
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> int:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __a ( self ) -> Dict:
a : Dict = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(lowerCAmelCase__ )
a : List[str] = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=lowerCAmelCase__ ) # Legal the president is
a : Any = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a : str = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 633 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_a : Optional[Any] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_a : str = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
_a : Dict = """|""".join(sys.argv[1:])
_a : List[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""")
_a : List[str] = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 710 |
import re
def snake_case__ ( UpperCAmelCase : str ):
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def snake_case__ ( UpperCAmelCase : str ):
lowerCAmelCase__ :List[Any] = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def snake_case__ ( UpperCAmelCase : str , UpperCAmelCase : bool , UpperCAmelCase : str ):
try:
lowerCAmelCase__ :Dict = split_input(UpperCAmelCase )
if upper:
lowerCAmelCase__ :Dict = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase__ :Any = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def snake_case__ ( UpperCAmelCase : str ):
return to_simple_case(UpperCAmelCase )
def snake_case__ ( UpperCAmelCase : str ):
try:
lowerCAmelCase__ :Any = to_simple_case(UpperCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def snake_case__ ( UpperCAmelCase : str , UpperCAmelCase : bool ):
return to_complex_case(UpperCAmelCase , UpperCAmelCase , "_" )
def snake_case__ ( UpperCAmelCase : str , UpperCAmelCase : bool ):
return to_complex_case(UpperCAmelCase , UpperCAmelCase , "-" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 111 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowercase__ ( A_: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase =k.replace(A_ , A_ )
if k.startswith("""encoder""" ):
__UpperCAmelCase =k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowercase__ ( A_: Tuple ) -> str:
"""simple docstring"""
__UpperCAmelCase =[
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase =sd.pop(A_ )
__UpperCAmelCase =k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase =v
__A = ["START"]
@torch.no_grad()
def lowercase__ ( A_: List[Any] , A_: str , A_: int ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase =torch.load(A_ , map_location="""cpu""" )
__UpperCAmelCase =model["""model"""]
__UpperCAmelCase =BlenderbotConfig.from_json_file(A_ )
__UpperCAmelCase =BlenderbotForConditionalGeneration(A_ )
__UpperCAmelCase =m.model.state_dict().keys()
__UpperCAmelCase =[]
__UpperCAmelCase ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase =rename_state_dict_key(A_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A_ )
m.model.load_state_dict(A_ , strict=A_ )
m.half()
m.save_pretrained(A_ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__A = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 68 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class lowerCAmelCase_ ( _lowercase , _lowercase ):
"""simple docstring"""
UpperCAmelCase__ = "convnextv2"
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
__UpperCamelCase = num_channels
__UpperCamelCase = patch_size
__UpperCamelCase = num_stages
__UpperCamelCase = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__UpperCamelCase = [3, 3, 9, 3] if depths is None else depths
__UpperCamelCase = hidden_act
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = drop_path_rate
__UpperCamelCase = image_size
__UpperCamelCase = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__UpperCamelCase , __UpperCamelCase = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 383 | 0 |
# Imports
import numpy as np
class UpperCamelCase__ :
def __init__( self : Tuple , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : str=None ):
'''simple docstring'''
self.set_matricies(red=UpperCamelCase__ , green=UpperCamelCase__ , blue=UpperCamelCase__ , red_edge=UpperCamelCase__ , nir=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Dict=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : int=None ):
'''simple docstring'''
if red is not None:
lowercase_ = red
if green is not None:
lowercase_ = green
if blue is not None:
lowercase_ = blue
if red_edge is not None:
lowercase_ = red_edge
if nir is not None:
lowercase_ = nir
return True
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[Any]="" , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Any=None ):
'''simple docstring'''
self.set_matricies(red=UpperCamelCase__ , green=UpperCamelCase__ , blue=UpperCamelCase__ , red_edge=UpperCamelCase__ , nir=UpperCamelCase__ )
lowercase_ = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Union[str, Any]=0.08 , UpperCamelCase__ : Optional[int]=1.22 , UpperCamelCase__ : Union[str, Any]=0.03 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self.nir - self.green
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[Any]=0.16 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Any=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : int=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowercase_ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 704 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a = TypeVar('T')
class UpperCamelCase__ ( Generic[T] ):
__SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
__SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
__SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = deque()
lowercase_ = set()
if not n:
lowercase_ = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
lowercase_ = n
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase_ = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase__ )
else:
self.dq_store.remove(UpperCamelCase__ )
self.dq_store.appendleft(UpperCamelCase__ )
self.key_reference.add(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for k in self.dq_store:
print(UpperCamelCase__ )
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
a = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 650 | 0 |
lowerCamelCase : Optional[Any] = 8.314_4598
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float ):
'''simple docstring'''
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowerCamelCase : Tuple = 300
lowerCamelCase : str = 28
lowerCamelCase : List[Any] = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 70 | from heapq import heappop, heappush
import numpy as np
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
__lowercase , __lowercase = grid.shape
__lowercase = [-1, 1, 0, 0]
__lowercase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__lowercase , __lowercase = [(0, source)], set()
__lowercase = np.full((rows, cols) , np.inf )
__lowercase = 0
__lowercase = np.empty((rows, cols) , dtype=_UpperCAmelCase )
__lowercase = None
while queue:
((__lowercase) , (__lowercase)) = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__lowercase = []
while (x, y) != source:
path.append((x, y) )
__lowercase , __lowercase = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
__lowercase , __lowercase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__lowercase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase , (dist + 1, (nx, ny)) )
__lowercase = dist + 1
__lowercase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 | 0 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]:
set_seed(3 )
# generate train_data and objective_set
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
_SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE )
print("""computing perplexity on objective set""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item()
print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]:
set_seed(42 )
# Load pre-trained model
_SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
_SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE )
# Train secondary learner
_SCREAMING_SNAKE_CASE : Any = train_secondary_learner(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(__SCREAMING_SNAKE_CASE )
secondary_learner.eval()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : int = []
# Compute the performance of the transformer model at the beginning
_SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE )
for epoch in range(int(__SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(__SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 )
_SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = True
if secondary_learner is not None:
_SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward(
torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
_SCREAMING_SNAKE_CASE : List[str] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCamelCase_()-> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
_SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
_SCREAMING_SNAKE_CASE : int = training_secondary_learner(
__SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
_SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 635 | """simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any:
return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]:
_SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Dict = []
if args.gold_data_mode == "qa":
_SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE )
for answer_list in data[1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE )
answers.append(__SCREAMING_SNAKE_CASE )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references]
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
total += 1
em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = args.k
_SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()]
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
def strip_title(__SCREAMING_SNAKE_CASE ):
if title.startswith("""\"""" ):
_SCREAMING_SNAKE_CASE : Optional[int] = title[1:]
if title.endswith("""\"""" ):
_SCREAMING_SNAKE_CASE : str = title[:-1]
return title
_SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device )
_SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0]
_SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever(
__SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
for docs in all_docs:
_SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) )
return provenance_strings
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
if args.print_predictions:
for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
return answers
def lowerCamelCase_()-> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
_SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if args.model_type is None:
_SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
_SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
_SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name
if args.index_path is not None:
_SCREAMING_SNAKE_CASE : Any = args.index_path
else:
_SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration
_SCREAMING_SNAKE_CASE : int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
_SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
_SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
model.retriever.init_retrieval()
else:
_SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
_SCREAMING_SNAKE_CASE : str = []
for line in tqdm(__SCREAMING_SNAKE_CASE ):
questions.append(line.strip() )
if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size:
_SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" )
preds_file.flush()
_SCREAMING_SNAKE_CASE : Any = []
if len(__SCREAMING_SNAKE_CASE ) > 0:
_SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
preds_file.flush()
score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCAmelCase_ = get_args()
main(args)
| 635 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 444 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCamelCase = getLogger(__name__)
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 8 , __UpperCamelCase = 10_24 , __UpperCamelCase="val" , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase="summarization" , __UpperCamelCase=None , __UpperCamelCase=1 , __UpperCamelCase = None , __UpperCamelCase="" , **__UpperCamelCase , ) -> Dict:
a__ : Any = str(__UpperCamelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__UpperCamelCase )
a__ : int = Path(__UpperCamelCase )
a__ : List[Any] = save_dir.joinpath(F'rank_{local_rank}_output.json' )
torch.cuda.set_device(__UpperCamelCase )
a__ : Any = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase ).cuda()
if fpaa:
a__ : Optional[int] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__UpperCamelCase , __UpperCamelCase ) # update config with task specific params
a__ : Optional[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
a__ : Optional[Any] = num_return_sequences
a__ : Any = AutoTokenizer.from_pretrained(__UpperCamelCase )
logger.info(F'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
a__ : Optional[Any] = tokenizer.model_max_length
if prefix is None:
a__ : str = prefix or getattr(model.config , "prefix" , "" ) or ""
a__ : str = SeqaSeqDataset(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , max_target_length=10_24 , type_path=__UpperCamelCase , n_obs=__UpperCamelCase , prefix=__UpperCamelCase , **__UpperCamelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
a__ : Any = ds.make_sortish_sampler(__UpperCamelCase , distributed=__UpperCamelCase , add_extra_examples=__UpperCamelCase , shuffle=__UpperCamelCase )
a__ : Optional[int] = DataLoader(__UpperCamelCase , sampler=__UpperCamelCase , batch_size=__UpperCamelCase , collate_fn=ds.collate_fn )
a__ : Optional[int] = []
for batch in tqdm(__UpperCamelCase ):
a__ : Optional[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__UpperCamelCase , num_beams=__UpperCamelCase , **__UpperCamelCase , )
a__ : Any = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
a__ : Dict = batch["ids"]
if num_return_sequences > 1:
a__ : int = chunks(__UpperCamelCase , __UpperCamelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__UpperCamelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__UpperCamelCase , __UpperCamelCase )
return results, sampler.num_replicas
def SCREAMING_SNAKE_CASE( ) -> List[str]:
a__ : List[str] = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__UpperCamelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__UpperCamelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__UpperCamelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__UpperCamelCase , default=__UpperCamelCase )
parser.add_argument(
"--type_path" , type=__UpperCamelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__UpperCamelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__UpperCamelCase , default=8 , required=__UpperCamelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=__UpperCamelCase , default=-1 , required=__UpperCamelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__UpperCamelCase , default=1 , required=__UpperCamelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__UpperCamelCase , default=6_00 , required=__UpperCamelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase )
parser.add_argument("--tgt_lang" , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase )
parser.add_argument(
"--prefix" , type=__UpperCamelCase , required=__UpperCamelCase , default=__UpperCamelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
a__ : str = time.time()
a__ , a__ : Dict = parser.parse_known_args()
a__ : Optional[Any] = parse_numeric_n_bool_cl_kwargs(__UpperCamelCase )
if generate_kwargs and args.local_rank <= 0:
print(F'parsed the following generate kwargs: {generate_kwargs}' )
a__ : List[str] = Path(args.save_dir + "_tmp" )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) # this handles locking.
a__ : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
a__ : List[Any] = {}
if args.src_lang is not None:
a__ : str = args.src_lang
if args.tgt_lang is not None:
a__ : int = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__UpperCamelCase )
a__ , a__ : Tuple = eval_data_dir(
args.data_dir , __UpperCamelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__UpperCamelCase , **__UpperCamelCase , )
if args.local_rank <= 0:
a__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=__UpperCamelCase )
a__ : Optional[int] = gather_results_from_each_node(__UpperCamelCase , __UpperCamelCase , args.sync_timeout )
a__ : Tuple = combine_partial_results(__UpperCamelCase )
if args.num_return_sequences > 1:
a__ : Optional[int] = save_dir.joinpath("pseudolabel_results.json" )
print(F'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(__UpperCamelCase , __UpperCamelCase )
return
a__ : Any = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__UpperCamelCase ) as f:
a__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(__UpperCamelCase )]
# Calculate metrics, save metrics, and save _generations.txt
a__ : Any = "translation" in args.task
a__ : List[Any] = calculate_bleu if calc_bleu else calculate_rouge
a__ : List[Any] = "bleu" if calc_bleu else "rouge"
a__ : Dict = score_fn(__UpperCamelCase , __UpperCamelCase )
a__ : int = len(__UpperCamelCase )
a__ : Union[str, Any] = time.time() - start_time
a__ : List[Any] = round(runtime / metrics["n_obs"] , 4 )
a__ : List[str] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
a__ : int = save_dir.joinpath(F'{args.type_path}_{metric_name}.json' )
save_json(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase )
print(__UpperCamelCase )
write_txt_file(__UpperCamelCase , save_dir.joinpath(F'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(__UpperCamelCase , save_dir.joinpath(F'{args.type_path}.target' ) )
else:
shutil.rmtree(__UpperCamelCase )
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> List:
a__ : Optional[Any] = []
for partial_result in partial_results:
records.extend(__UpperCamelCase )
a__ : Tuple = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x["id"] )
a__ : Tuple = [x["pred"] for x in records]
return preds
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
a__ : Optional[Any] = time.time()
logger.info("waiting for all nodes to finish" )
a__ : int = None
while (time.time() - start_wait) < timeout:
a__ : List[str] = list(save_dir.glob("rank_*.json" ) )
if len(__UpperCamelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
a__ : int = lmap(__UpperCamelCase , __UpperCamelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 191 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = list(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
__SCREAMING_SNAKE_CASE : int = "_"
if count > 1:
return False
else:
return "".join(__lowerCamelCase )
def _lowerCAmelCase ( __lowerCamelCase : list[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
while True:
__SCREAMING_SNAKE_CASE : Optional[Any] = ["$"] * len(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = []
for i in range(len(__lowerCamelCase ) ):
for j in range(i + 1 , len(__lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : str = compare_string(binary[i] , binary[j] )
if k is False:
__SCREAMING_SNAKE_CASE : Optional[Any] = "*"
__SCREAMING_SNAKE_CASE : Union[str, Any] = "*"
temp.append("X" )
for i in range(len(__lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowerCamelCase ) == 0:
return pi
__SCREAMING_SNAKE_CASE : List[str] = list(set(__lowerCamelCase ) )
def _lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : Sequence[float] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = []
for minterm in minterms:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ""
for _ in range(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str = str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowerCamelCase )
return temp
def _lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = list(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = list(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _lowerCAmelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Dict = [0] * len(__lowerCamelCase )
for i in range(len(chart[0] ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Any = -1
for j in range(len(__lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
__SCREAMING_SNAKE_CASE : Optional[Any] = j
if count == 1:
__SCREAMING_SNAKE_CASE : str = 1
for i in range(len(__lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Tuple = 0
temp.append(prime_implicants[i] )
while True:
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : str = -1
__SCREAMING_SNAKE_CASE : int = 0
for i in range(len(__lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Any = chart[i].count(1 )
if count_n > max_n:
__SCREAMING_SNAKE_CASE : int = count_n
__SCREAMING_SNAKE_CASE : List[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
def _lowerCAmelCase ( __lowerCamelCase : list[str] , __lowerCamelCase : list[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[0 for x in range(len(__lowerCamelCase ) )] for x in range(len(__lowerCamelCase ) )]
for i in range(len(__lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = prime_implicants[i].count("_" )
for j in range(len(__lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 1
return chart
def _lowerCAmelCase ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = int(input("Enter the no. of variables\n" ) )
__SCREAMING_SNAKE_CASE : int = [
float(__lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
__SCREAMING_SNAKE_CASE : Dict = decimal_to_binary(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = check(__lowerCamelCase )
print("Prime Implicants are:" )
print(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = prime_implicant_chart(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = selection(__lowerCamelCase , __lowerCamelCase )
print("Essential Prime Implicants are:" )
print(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 707 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
def __init__( self : int , *UpperCamelCase : Optional[int] , **UpperCamelCase : Optional[int] )->None:
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 447 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a ( snake_case__: List[Any] , snake_case__: Optional[Any] ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowercase_ = flax_key_tuple[:-1] + ('''weight''',)
lowercase_ = torch.permute(snake_case__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ):
# linear layer
lowercase_ = flax_key_tuple[:-1] + ('''weight''',)
lowercase_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase_ = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def a ( snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: Any ):
'''simple docstring'''
if "metadata" in layer:
lowercase_ = layer.split('''metadata''' )
lowercase_ = ''''''.join(split_layer[0] )[:-1]
lowercase_ = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
lowercase_ = layer.split('''kvstore''' )
lowercase_ = ''''''.join(split_layer[0] )[:-1]
lowercase_ = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
lowercase_ = layer.split('''/''' )
lowercase_ = '''/'''.join(split_layer[:-1] )
lowercase_ = (split_layer[-1],)
if "kvstore/path" in layer:
lowercase_ = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
lowercase_ = '''file'''
else:
lowercase_ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a ( snake_case__: Optional[Any] , snake_case__: Any ):
'''simple docstring'''
lowercase_ = rename_keys(snake_case__ )
lowercase_ = {}
for k, v in current_block.items():
lowercase_ = v
lowercase_ = new_current_block
torch.save(snake_case__ , snake_case__ )
def a ( snake_case__: Optional[int] , snake_case__: Tuple , snake_case__: Union[str, Any] , snake_case__: Dict , snake_case__: str = WEIGHTS_NAME ):
'''simple docstring'''
lowercase_ = convert_file_size_to_int(snake_case__ )
lowercase_ = []
lowercase_ = {}
lowercase_ = 0
lowercase_ = 0
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
lowercase_ = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
lowercase_ = flatten_dict(snake_case__ , sep='''/''' )
lowercase_ = {}
for layer in checkpoint_info.keys():
lowercase_ , lowercase_ , lowercase_ = get_key_and_tensorstore_dict(
snake_case__ , snake_case__ , snake_case__ )
if curr_real_layer_name in all_layers:
lowercase_ = content
else:
lowercase_ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowercase_ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowercase_ = torch.tensor(snake_case__ )
lowercase_ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowercase_ , lowercase_ = rename_base_flax_keys(tuple(key.split('''/''' ) ) , snake_case__ )
lowercase_ = '''/'''.join(snake_case__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowercase_ = os.path.join(
snake_case__ , weights_name.replace('''.bin''' , F'''-{len(snake_case__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowercase_ = {}
lowercase_ = 0
lowercase_ = raw_weights.to(getattr(snake_case__ , snake_case__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowercase_ = os.path.join(snake_case__ , weights_name.replace('''.bin''' , F'''-{len(snake_case__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(snake_case__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowercase_ = {}
lowercase_ = {}
for idx, shard in enumerate(snake_case__ ):
lowercase_ = weights_name.replace(
'''.bin''' , F'''-{idx+1:05d}-of-{len(snake_case__ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
lowercase_ = os.path.join(snake_case__ , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
lowercase_ = shard
for key in shard:
lowercase_ = shard_file
# Add the metadata
lowercase_ = {'''total_size''': total_size}
lowercase_ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' ) as f:
lowercase_ = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n'''
f.write(snake_case__ )
return metadata, index
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__a = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowercase_ = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
lowercase_ = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
lowercase_ = TaTokenizer.from_pretrained('''t5-small''' )
lowercase_ = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
lowercase_ = tokenizer(snake_case__ , return_tensors='''pt''' ).input_ids
lowercase_ = model.generate(snake_case__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 97 |
from __future__ import annotations
def a ( snake_case__: list[list[int]] ):
'''simple docstring'''
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(snake_case__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(snake_case__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowercase_ :
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
return None
class lowercase_ :
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
return None
class lowercase_ ( unittest.TestCase ):
_lowerCamelCase = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase_ , "tf" , 12 , **lowercase_ )
@require_torch
@slow
def UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase_ , "pt" , 12 , **lowercase_ )
@require_torch
@slow
def UpperCamelCase ( self ):
from transformers import BertModel
_snake_case : Dict = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(lowercase_ ) )
vocab_file.flush()
_snake_case : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_snake_case : Any = BertModel(BertConfig(vocab_size=len(lowercase_ ) ) )
model.save_pretrained(lowercase_ )
self._test_export(lowercase_ , "pt" , 12 , lowercase_ )
@require_tf
@slow
def UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_snake_case : Tuple = self._test_export(lowercase_ , "tf" , 12 , **lowercase_ )
_snake_case : int = quantize(Path(lowercase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase_ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_snake_case : Any = self._test_export(lowercase_ , "pt" , 12 , **lowercase_ )
_snake_case : List[str] = quantize(lowercase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase_ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None , **lowercase_ ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
_snake_case : Union[str, Any] = Path(lowercase_ ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
return path
except Exception as e:
self.fail(lowercase_ )
@require_torch
@require_tokenizers
@slow
def UpperCamelCase ( self ):
from transformers import BertModel
_snake_case : Dict = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
_snake_case : Dict = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowercase_ , lowercase_ , "pt" )
@require_tf
@require_tokenizers
@slow
def UpperCamelCase ( self ):
from transformers import TFBertModel
_snake_case : Optional[Any] = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
_snake_case : Optional[int] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowercase_ , lowercase_ , "tf" )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
_snake_case : Tuple = FeatureExtractionPipeline(lowercase_ , lowercase_ )
_snake_case : List[str] = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
_snake_case : Optional[int] = infer_shapes(lowercase_ , lowercase_ )
# Assert all variables are present
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowercase_ )
self.assertSequenceEqual(variable_names[3:] , lowercase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def UpperCamelCase ( self ):
_snake_case : Any = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
_snake_case : List[str] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
_snake_case : Optional[int] = ensure_valid_input(FuncContiguousArgs() , lowercase_ , lowercase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowercase_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowercase_ ) , set(lowercase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowercase_ , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_snake_case : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() , lowercase_ , lowercase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowercase_ ) , 1 )
self.assertEqual(len(lowercase_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def UpperCamelCase ( self ):
_snake_case : int = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() ) | 719 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP | 580 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__lowerCamelCase : Optional[Any] = logging.getLogger()
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = {}
lowercase = os.path.join(lowerCAmelCase_ , "all_results.json" )
if os.path.exists(lowerCAmelCase_ ):
with open(lowerCAmelCase_ , "r" ) as f:
lowercase = json.load(lowerCAmelCase_ )
else:
raise ValueError(f'can\'t find {path}' )
return results
__lowerCamelCase : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( _lowercase ):
def UpperCAmelCase__ (self : Optional[int] ) -> int:
import xla_spawn
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(A__ , "argv" , A__ ):
lowercase = time()
xla_spawn.main()
lowercase = time()
lowercase = get_results(A__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def UpperCAmelCase__ (self : Any ) -> Optional[int]:
import xla_spawn
lowercase = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(A__ , "argv" , A__ ):
xla_spawn.main()
| 310 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return (-y * np.log(lowerCAmelCase_ ) - (1 - y) * np.log(1 - h )).mean()
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase_ ) ) )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=7_0000 ):
"""simple docstring"""
lowercase = np.zeros(x.shape[1] )
for iterations in range(lowerCAmelCase_ ):
lowercase = np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase = sigmoid_function(lowerCAmelCase_ )
lowercase = np.dot(x.T , h - y ) / y.size
lowercase = theta - alpha * gradient # updating the weights
lowercase = np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase = sigmoid_function(lowerCAmelCase_ )
lowercase = cost_function(lowerCAmelCase_ , lowerCAmelCase_ )
if iterations % 100 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCamelCase : int = datasets.load_iris()
__lowerCamelCase : Any = iris.data[:, :2]
__lowerCamelCase : List[Any] = (iris.target != 0) * 1
__lowerCamelCase : Tuple = 0.1
__lowerCamelCase : Dict = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("theta: ", theta) # printing the theta i.e our weights vector
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return sigmoid_function(
np.dot(lowerCAmelCase_ , lowerCAmelCase_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((__lowerCamelCase) , (__lowerCamelCase)) : int = (x[:, 0].min(), x[:, 0].max())
((__lowerCamelCase) , (__lowerCamelCase)) : int = (x[:, 1].min(), x[:, 1].max())
((__lowerCamelCase) , (__lowerCamelCase)) : Optional[int] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCamelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCamelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 310 | 1 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.txt"}
UpperCAmelCase = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
UpperCAmelCase = {
"openbmb/cpm-ant-10b": 1_024,
}
def A ( A_ : Union[str, Any] ):
snake_case : int = collections.OrderedDict()
with open(A_ , '''r''' , encoding='''utf-8''' ) as reader:
snake_case : Union[str, Any] = reader.readlines()
for index, token in enumerate(A_ ):
snake_case : Any = token.rstrip('''\n''' )
snake_case : Optional[int] = index
return vocab
class a ( __magic_name__ ):
def __init__( self : Union[str, Any], SCREAMING_SNAKE_CASE_ : Any, SCREAMING_SNAKE_CASE_ : List[str]="<unk>", SCREAMING_SNAKE_CASE_ : Dict=2_00 ):
snake_case : Any = vocab
snake_case : Optional[int] = unk_token
snake_case : Dict = max_input_chars_per_word
def __snake_case ( self : Dict, SCREAMING_SNAKE_CASE_ : Tuple ):
snake_case : str = list(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > self.max_input_chars_per_word:
return [self.unk_token]
snake_case : str = 0
snake_case : Optional[int] = []
while start < len(SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
snake_case : Any = None
while start < end:
snake_case : Optional[int] = ''''''.join(chars[start:end] )
if substr in self.vocab:
snake_case : Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(SCREAMING_SNAKE_CASE_ )
snake_case : Any = end
return sub_tokens
class a ( __magic_name__ ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
_snake_case = False
def __init__( self : Optional[Any], SCREAMING_SNAKE_CASE_ : Optional[int], SCREAMING_SNAKE_CASE_ : Optional[Any]="<d>", SCREAMING_SNAKE_CASE_ : int="</d>", SCREAMING_SNAKE_CASE_ : Tuple="<s>", SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>", SCREAMING_SNAKE_CASE_ : List[Any]="<pad>", SCREAMING_SNAKE_CASE_ : Optional[int]="<unk>", SCREAMING_SNAKE_CASE_ : Dict="</n>", SCREAMING_SNAKE_CASE_ : List[str]="</_>", SCREAMING_SNAKE_CASE_ : Optional[int]="left", **SCREAMING_SNAKE_CASE_ : Tuple, ):
requires_backends(self, ['''jieba'''] )
super().__init__(
bod_token=SCREAMING_SNAKE_CASE_, eod_token=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, line_token=SCREAMING_SNAKE_CASE_, space_token=SCREAMING_SNAKE_CASE_, padding_side=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
snake_case : Optional[Any] = bod_token
snake_case : int = eod_token
snake_case : Dict = load_vocab(SCREAMING_SNAKE_CASE_ )
snake_case : Any = self.encoder[space_token]
snake_case : List[str] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
snake_case : Tuple = collections.OrderedDict(sorted(self.encoder.items(), key=lambda SCREAMING_SNAKE_CASE_ : x[1] ) )
snake_case : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case : Optional[Any] = WordpieceTokenizer(vocab=self.encoder, unk_token=self.unk_token )
@property
def __snake_case ( self : Union[str, Any] ):
return self.encoder[self.bod_token]
@property
def __snake_case ( self : str ):
return self.encoder[self.eod_token]
@property
def __snake_case ( self : str ):
return self.encoder["\n"]
@property
def __snake_case ( self : Tuple ):
return len(self.encoder )
def __snake_case ( self : List[Any] ):
return dict(self.encoder, **self.added_tokens_encoder )
def __snake_case ( self : List[str], SCREAMING_SNAKE_CASE_ : int ):
snake_case : Tuple = []
for x in jieba.cut(SCREAMING_SNAKE_CASE_, cut_all=SCREAMING_SNAKE_CASE_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
return output_tokens
def __snake_case ( self : List[str], SCREAMING_SNAKE_CASE_ : Dict, **SCREAMING_SNAKE_CASE_ : Optional[int] ):
snake_case : List[Any] = [i for i in token_ids if i >= 0]
snake_case : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any], SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return token in self.encoder
def __snake_case ( self : Optional[Any], SCREAMING_SNAKE_CASE_ : List[str] ):
return "".join(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict, SCREAMING_SNAKE_CASE_ : List[str] ):
return self.encoder.get(SCREAMING_SNAKE_CASE_, self.encoder.get(self.unk_token ) )
def __snake_case ( self : int, SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_, self.unk_token )
def __snake_case ( self : Optional[Any], SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if os.path.isdir(SCREAMING_SNAKE_CASE_ ):
snake_case : Tuple = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
snake_case : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
snake_case : Dict = 0
if " " in self.encoder:
snake_case : Optional[int] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
snake_case : Union[str, Any] = self.encoder['''\n''']
del self.encoder["\n"]
snake_case : Tuple = collections.OrderedDict(sorted(self.encoder.items(), key=lambda SCREAMING_SNAKE_CASE_ : x[1] ) )
with open(SCREAMING_SNAKE_CASE_, '''w''', encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
snake_case : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __snake_case ( self : Union[str, Any], SCREAMING_SNAKE_CASE_ : List[int], SCREAMING_SNAKE_CASE_ : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __snake_case ( self : Optional[Any], SCREAMING_SNAKE_CASE_ : List[int], SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None, SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
| 702 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def A ( A_ : str ):
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def A ( A_ : Optional[Any] ):
class a :
def __init__( self : List[str], SCREAMING_SNAKE_CASE_ : Optional[Any] ):
snake_case : List[str] = metric_id
class a :
_snake_case = [MetricMock(__magic_name__ ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def __snake_case ( self : str ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def A ( A_ : str , A_ : Dict , A_ : int , A_ : int , A_ : str ):
if "tmp_path" in args:
snake_case : List[str] = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(A_ , match='''https://huggingface.co/docs/evaluate''' ):
func(*A_ )
| 555 | 0 |
"""simple docstring"""
import os
def A_ ( ):
'''simple docstring'''
UpperCamelCase : int = os.path.join(os.path.dirname(snake_case_ ) ,"""num.txt""" )
with open(snake_case_ ) as file_hand:
return str(sum(int(snake_case_ ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 499 |
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__A : int = logging.get_logger(__name__)
__A : Optional[Any] = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class lowerCamelCase ( _UpperCAmelCase ):
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : str = max_length
UpperCamelCase : List[Any] = max_position_embeddings
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = input_ids.shape[-1]
UpperCamelCase : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
f'maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
f'Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '
"""with `max_length = start_length + max_new_tokens` instead.""" , SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Dict = start_length
UpperCamelCase : List[Any] = max_new_tokens
UpperCamelCase : int = start_length + max_new_tokens
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : List[str] = max_time
UpperCamelCase : Optional[int] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase ( _UpperCAmelCase ):
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return any(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for criteria in self )
@property
def a_ ( self ):
for stopping_criterium in self:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
return None
def A_ ( snake_case_ : StoppingCriteriaList ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = stopping_criteria.max_length
UpperCamelCase : Tuple = deepcopy(snake_case_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" ,snake_case_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=snake_case_ ) )
return new_stopping_criteria
| 499 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger()
@dataclass
class _a :
"""simple docstring"""
A_ = 4_2
A_ = field(default_factory=__a )
A_ = field(default_factory=__a )
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Tensor , lowercase_ : Tensor ):
'''simple docstring'''
lowercase_ = len(list(m.modules() ) ) == 1 or isinstance(lowercase_ , nn.Convad ) or isinstance(lowercase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowercase_ )
def __call__( self : int , lowercase_ : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowercase_ )
[x.remove() for x in self.handles]
return self
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return list(filter(lambda lowercase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _a :
"""simple docstring"""
A_ = 4_2
A_ = 4_2
A_ = 0
A_ = field(default_factory=__a )
A_ = field(default_factory=__a )
def __call__( self : str , lowercase_ : Tensor ):
'''simple docstring'''
lowercase_ = Tracker(self.dest )(lowercase_ ).parametrized
lowercase_ = Tracker(self.src )(lowercase_ ).parametrized
lowercase_ = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.src_skip , lowercase_ ) )
lowercase_ = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.dest_skip , lowercase_ ) )
if len(lowercase_ ) != len(lowercase_ ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(lowercase_ )} operations while"""
F""" destination module has {len(lowercase_ )}.""" )
for dest_m, src_m in zip(lowercase_ , lowercase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True ) ->Optional[Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
lowercase_ = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ).eval()
lowercase_ = ResNetForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
lowercase_ = ModuleTransfer(src=SCREAMING_SNAKE_CASE_ , dest=SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(from_model(SCREAMING_SNAKE_CASE_ ) , our_model(SCREAMING_SNAKE_CASE_ ).logits ), "The model logits don't match the original one."
lowercase_ = f"""resnet{"-".join(name.split("resnet" ) )}"""
print(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
# we can use the convnext one
lowercase_ = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
print(f"""Pushed {checkpoint_name}""" )
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True ) ->str:
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = 10_00
lowercase_ = (1, num_labels)
lowercase_ = """huggingface/label-files"""
lowercase_ = num_labels
lowercase_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" ) , """r""" ) )
lowercase_ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
lowercase_ = partial(SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ )
lowercase_ = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(SCREAMING_SNAKE_CASE_ , names_to_config[model_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return config, expected_shape
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
__snake_case = parser.parse_args()
__snake_case = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 711 | '''simple docstring'''
import argparse
__snake_case = """docs/source/_static/js/custom.js"""
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Any:
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ = f.readlines()
lowercase_ = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
__snake_case = parser.parse_args()
update_custom_js(args.version)
| 603 | 0 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCamelCase_ ( self : Tuple , _A : str , _A : List[Any] , _A : List[Any] ):
_UpperCamelCase = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_UpperCamelCase = VideoClassificationPipeline(model=_A , image_processor=_A , top_k=2 )
_UpperCamelCase = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def UpperCamelCase_ ( self : Tuple , _A : List[Any] , _A : Union[str, Any] ):
for example in examples:
_UpperCamelCase = video_classifier(_A )
self.assertEqual(
_A , [
{'''score''': ANY(_A ), '''label''': ANY(_A )},
{'''score''': ANY(_A ), '''label''': ANY(_A )},
] , )
@require_torch
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
_UpperCamelCase = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
_UpperCamelCase = pipeline(
'''video-classification''' , model=_A , feature_extractor=_A , frame_sampling_rate=4 )
_UpperCamelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_UpperCamelCase = video_classifier(_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , )
_UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def UpperCamelCase_ ( self : Optional[int] ):
pass
| 10 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ) -> List[Any]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def _a ( self ) -> str:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = True
lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = True
__A = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = FlaxBertModelTester(self )
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = FlaxBertModel.from_pretrained("""bert-base-cased""" )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 588 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : str = logging.get_logger(__name__)
class _UpperCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] ="timm_backbone"
def __init__( self , __a=None , __a=3 , __a=True , __a=True , __a=None , **__a , ):
super().__init__(**lowerCamelCase__ )
__lowerCAmelCase = backbone
__lowerCAmelCase = num_channels
__lowerCAmelCase = features_only
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = True
__lowerCAmelCase = out_indices if out_indices is not None else (-1,)
| 718 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = psutil.Process()
__lowerCAmelCase = False
def snake_case ( self ):
__lowerCAmelCase = -1
while True:
__lowerCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def snake_case ( self ):
__lowerCAmelCase = True
__lowerCAmelCase = threading.Thread(target=self.peak_monitor )
__lowerCAmelCase = True
self.thread.start()
def snake_case ( self ):
__lowerCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
A : Any = PeakCPUMemory()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase = torch.cuda.memory_allocated(_UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase = (torch.cuda.memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
__lowerCAmelCase = (torch.cuda.max_memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
return measures
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(_UpperCamelCase )]:.2f}MiB" )
__lowerCAmelCase = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 282 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Tuple = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = '''realm'''
def __init__( self , A__=3_0522 , A__=768 , A__=128 , A__=12 , A__=12 , A__=8 , A__=3072 , A__="gelu_new" , A__=0.1 , A__=0.1 , A__=512 , A__=2 , A__=0.0_2 , A__=1e-12 , A__=256 , A__=10 , A__=1e-3 , A__=5 , A__=320 , A__=1335_3718 , A__=5000 , A__=1 , A__=0 , A__=2 , **A__ , ):
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
# Common config
A__ : Dict = vocab_size
A__ : Optional[int] = max_position_embeddings
A__ : Optional[Any] = hidden_size
A__ : Optional[int] = retriever_proj_size
A__ : Tuple = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = num_candidates
A__ : Union[str, Any] = intermediate_size
A__ : Union[str, Any] = hidden_act
A__ : Optional[int] = hidden_dropout_prob
A__ : int = attention_probs_dropout_prob
A__ : Dict = initializer_range
A__ : Tuple = type_vocab_size
A__ : Union[str, Any] = layer_norm_eps
# Reader config
A__ : Dict = span_hidden_size
A__ : int = max_span_width
A__ : List[Any] = reader_layer_norm_eps
A__ : List[str] = reader_beam_size
A__ : List[str] = reader_seq_len
# Retrieval config
A__ : int = num_block_records
A__ : Union[str, Any] = searcher_beam_size
| 456 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A__ : Optional[int] = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=A__ , cache_dir=A__ )
A__ : int = [t[-1] for t in os.walk(os.path.join(A__ , os.listdir(A__ )[0] , """snapshots""" ) )]
A__ : str = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ , A__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=A__ )
A__ : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
A__ : Tuple = jax.random.PRNGKey(0 )
A__ : int = 4
A__ : Optional[Any] = jax.device_count()
A__ : Union[str, Any] = num_samples * [prompt]
A__ : str = pipeline.prepare_inputs(A__ )
# shard inputs and rng
A__ : str = replicate(A__ )
A__ : Any = jax.random.split(A__ , A__ )
A__ : Optional[Any] = shard(A__ )
A__ : int = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(A__ , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A__ : str = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(A__ ) == num_samples
def __A ( self ):
A__ , A__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=A__ )
A__ : Tuple = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
A__ : Tuple = jax.random.PRNGKey(0 )
A__ : Optional[Any] = 50
A__ : List[str] = jax.device_count()
A__ : Dict = num_samples * [prompt]
A__ : Union[str, Any] = pipeline.prepare_inputs(A__ )
# shard inputs and rng
A__ : int = replicate(A__ )
A__ : List[str] = jax.random.split(A__ , A__ )
A__ : Optional[Any] = shard(A__ )
A__ : Optional[Any] = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __A ( self ):
A__ , A__ : int = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=A__ )
A__ : Any = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
A__ : Dict = jax.random.PRNGKey(0 )
A__ : str = 50
A__ : Any = jax.device_count()
A__ : List[Any] = num_samples * [prompt]
A__ : Any = pipeline.prepare_inputs(A__ )
# shard inputs and rng
A__ : Any = replicate(A__ )
A__ : str = jax.random.split(A__ , A__ )
A__ : Optional[Any] = shard(A__ )
A__ : Dict = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __A ( self ):
A__ , A__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
A__ : List[str] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
A__ : int = jax.random.PRNGKey(0 )
A__ : Optional[int] = 50
A__ : Optional[Any] = jax.device_count()
A__ : Optional[int] = num_samples * [prompt]
A__ : Optional[int] = pipeline.prepare_inputs(A__ )
# shard inputs and rng
A__ : Optional[int] = replicate(A__ )
A__ : Tuple = jax.random.split(A__ , A__ )
A__ : Optional[int] = shard(A__ )
A__ : Optional[int] = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __A ( self ):
A__ : int = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , set_alpha_to_one=A__ , steps_offset=1 , )
A__ , A__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=A__ , safety_checker=A__ , )
A__ : Union[str, Any] = scheduler.create_state()
A__ : Optional[Any] = scheduler_state
A__ : Union[str, Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
A__ : Tuple = jax.random.PRNGKey(0 )
A__ : List[Any] = 50
A__ : Optional[int] = jax.device_count()
A__ : Dict = num_samples * [prompt]
A__ : List[Any] = pipeline.prepare_inputs(A__ )
# shard inputs and rng
A__ : Tuple = replicate(A__ )
A__ : Dict = jax.random.split(A__ , A__ )
A__ : Dict = shard(A__ )
A__ : int = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __A ( self ):
A__ : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
A__ : Optional[int] = jax.device_count()
A__ : str = num_samples * [prompt]
A__ : Any = jax.random.split(jax.random.PRNGKey(0 ) , A__ )
A__ , A__ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=A__ , )
A__ : Tuple = replicate(A__ )
A__ : Dict = pipeline.prepare_inputs(A__ )
A__ : str = shard(A__ )
A__ : Tuple = pipeline(A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A__ : Optional[Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A__ , A__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=A__ , use_memory_efficient_attention=A__ , )
A__ : Any = replicate(A__ )
A__ : Optional[int] = pipeline.prepare_inputs(A__ )
A__ : Dict = shard(A__ )
A__ : Tuple = pipeline(A__ , A__ , A__ , jit=A__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A__ : Union[str, Any] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 456 | 1 |
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCAmelCase ( __UpperCamelCase=None ):
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase__ : Dict = subparsers.add_parser("""test""" )
else:
UpperCAmelCase__ : str = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=lowercase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
UpperCAmelCase__ : int = script_name
else:
UpperCAmelCase__ : Dict = F"--config_file={args.config_file} {script_name}"
UpperCAmelCase__ : str = ["accelerate-launch"] + test_args.split()
UpperCAmelCase__ : Tuple = execute_subprocess_async(lowercase_ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : int = test_command_parser()
UpperCAmelCase__ : Tuple = parser.parse_args()
test_command(lowercase_ )
if __name__ == "__main__":
main()
| 707 |
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__UpperCAmelCase = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
__UpperCAmelCase = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__UpperCAmelCase = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__UpperCAmelCase = sorted(arg_to_scheduler.keys())
__UpperCAmelCase = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class __lowercase ( pl.LightningModule ):
def __init__( self : List[str] ,A : argparse.Namespace ,A : List[Any]=None ,A : Any="base" ,A : List[str]=None ,A : Optional[Any]=None ,A : int=None ,**A : Union[str, Any] ,):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(A )
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : List[Any] = Path(self.hparams.output_dir )
UpperCAmelCase__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path ,**({"""num_labels""": num_labels} if num_labels is not None else {}) ,cache_dir=A ,**A ,)
else:
UpperCAmelCase__ : PretrainedConfig = config
UpperCAmelCase__ : Optional[int] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams ,A ,A ):
assert hasattr(self.config ,A ), f"model config doesn't have a `{p}` attribute"
setattr(self.config ,A ,getattr(self.hparams ,A ) )
if tokenizer is None:
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path ,cache_dir=A ,)
else:
UpperCAmelCase__ : PreTrainedTokenizer = tokenizer
UpperCAmelCase__ : Optional[int] = MODEL_MODES[mode]
if model is None:
UpperCAmelCase__ : Optional[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path ,from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) ,config=self.config ,cache_dir=A ,)
else:
UpperCAmelCase__ : List[Any] = model
def __lowercase ( self : Optional[int] ,*A : int ,**A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_type.from_pretrained(*A ,**A )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase__ : Optional[int] = get_schedule_func(
self.opt ,num_warmup_steps=self.hparams.warmup_steps ,num_training_steps=self.total_steps() )
UpperCAmelCase__ : Any = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model
UpperCAmelCase__ : Any = ["""bias""", """LayerNorm.weight"""]
UpperCAmelCase__ : str = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase__ : List[str] = Adafactor(
A ,lr=self.hparams.learning_rate ,scale_parameter=A ,relative_step=A )
else:
UpperCAmelCase__ : Tuple = AdamW(
A ,lr=self.hparams.learning_rate ,eps=self.hparams.adam_epsilon )
UpperCAmelCase__ : Tuple = optimizer
UpperCAmelCase__ : Optional[int] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowercase ( self : Optional[int] ,A : List[str] ,A : int ):
'''simple docstring'''
return self.validation_step(A ,A )
def __lowercase ( self : Any ,A : Tuple ):
'''simple docstring'''
return self.validation_end(A )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = max(1 ,self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase__ : Union[str, Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowercase ( self : List[Any] ,A : Optional[int] ):
'''simple docstring'''
if stage == "test":
UpperCAmelCase__ : Tuple = len(self.test_dataloader().dataset )
else:
UpperCAmelCase__ : Any = self.get_dataloader("""train""" ,self.hparams.train_batch_size ,shuffle=A )
UpperCAmelCase__ : int = len(self.train_dataloader().dataset )
def __lowercase ( self : Dict ,A : str ,A : int ,A : bool = False ):
'''simple docstring'''
raise NotImplementedError("""You must implement this for your task""" )
def __lowercase ( self : str ):
'''simple docstring'''
return self.train_loader
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return self.get_dataloader("""dev""" ,self.hparams.eval_batch_size ,shuffle=A )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.get_dataloader("""test""" ,self.hparams.eval_batch_size ,shuffle=A )
def __lowercase ( self : Union[str, Any] ,A : Optional[int] ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir ,"""cached_{}_{}_{}""".format(
A ,list(filter(A ,self.hparams.model_name_or_path.split("""/""" ) ) ).pop() ,str(self.hparams.max_seq_length ) ,) ,)
@pl.utilities.rank_zero_only
def __lowercase ( self : Tuple ,A : Dict[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.output_dir.joinpath("""best_tfmr""" )
UpperCAmelCase__ : int = self.step_count
self.model.save_pretrained(A )
self.tokenizer.save_pretrained(A )
@staticmethod
def __lowercase ( A : Any ,A : Optional[int] ):
'''simple docstring'''
parser.add_argument(
"""--model_name_or_path""" ,default=A ,type=A ,required=A ,help="""Path to pretrained model or model identifier from huggingface.co/models""" ,)
parser.add_argument(
"""--config_name""" ,default="""""" ,type=A ,help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" ,default=A ,type=A ,help="""Pretrained tokenizer name or path if not the same as model_name""" ,)
parser.add_argument(
"""--cache_dir""" ,default=str(Path(A ).parent / """test_run""" / """cache""" ) ,type=A ,help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" ,)
parser.add_argument(
"""--encoder_layerdrop""" ,type=A ,help="""Encoder layer dropout probability (Optional). Goes into model.config""" ,)
parser.add_argument(
"""--decoder_layerdrop""" ,type=A ,help="""Decoder layer dropout probability (Optional). Goes into model.config""" ,)
parser.add_argument(
"""--dropout""" ,type=A ,help="""Dropout probability (Optional). Goes into model.config""" ,)
parser.add_argument(
"""--attention_dropout""" ,type=A ,help="""Attention dropout probability (Optional). Goes into model.config""" ,)
parser.add_argument("""--learning_rate""" ,default=5e-5 ,type=A ,help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" ,default="""linear""" ,choices=A ,metavar=A ,type=A ,help="""Learning rate scheduler""" ,)
parser.add_argument("""--weight_decay""" ,default=0.0 ,type=A ,help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" ,default=1e-8 ,type=A ,help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" ,default=0 ,type=A ,help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" ,default=4 ,type=A ,help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" ,dest="""max_epochs""" ,default=3 ,type=A )
parser.add_argument("""--train_batch_size""" ,default=32 ,type=A )
parser.add_argument("""--eval_batch_size""" ,default=32 ,type=A )
parser.add_argument("""--adafactor""" ,action="""store_true""" )
class __lowercase ( pl.Callback ):
def __lowercase ( self : List[Any] ,A : Any ,A : Dict ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowercase ( pl.Callback ):
def __lowercase ( self : Optional[int] ,A : Union[str, Any] ,A : int ):
'''simple docstring'''
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(A )
class __lowercase ( pl.Callback ):
def __lowercase ( self : Tuple ,A : Dict ,A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = trainer.lr_schedulers[0]["""scheduler"""]
UpperCAmelCase__ : int = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(A )
def __lowercase ( self : List[str] ,A : pl.Trainer ,A : pl.LightningModule ):
'''simple docstring'''
rank_zero_info("""***** Validation results *****""" )
UpperCAmelCase__ : List[Any] = trainer.callback_metrics
# Log results
for key in sorted(A ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(A ,str(metrics[key] ) ) )
def __lowercase ( self : Tuple ,A : pl.Trainer ,A : pl.LightningModule ):
'''simple docstring'''
rank_zero_info("""***** Test results *****""" )
UpperCAmelCase__ : Union[str, Any] = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase__ : Any = os.path.join(pl_module.hparams.output_dir ,"""test_results.txt""" )
with open(A ,"""w""" ) as writer:
for key in sorted(A ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(A ,str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(A ,str(metrics[key] ) ) )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
parser.add_argument(
"""--output_dir""" , default=str(Path(__UpperCamelCase ).parent / """test_run""" / """model_checkpoints""" ) , type=__UpperCamelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=__UpperCamelCase , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=__UpperCamelCase )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=__UpperCamelCase , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=__UpperCamelCase , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=__UpperCamelCase , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(__UpperCamelCase ).parent / """test_run""" / """dummy-train-data""" ) , type=__UpperCamelCase , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[] , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
UpperCAmelCase__ : Optional[int] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__UpperCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase__ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__UpperCamelCase )
if logging_callback is None:
UpperCAmelCase__ : Tuple = LoggingCallback()
UpperCAmelCase__ : Optional[int] = {}
if args.fpaa:
UpperCAmelCase__ : Any = 16
if args.gpus > 1:
UpperCAmelCase__ : Any = """auto"""
UpperCAmelCase__ : str = """ddp"""
UpperCAmelCase__ : Dict = args.accumulate_grad_batches
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Tuple = """auto"""
UpperCAmelCase__ : Any = pl.Trainer.from_argparse_args(
__UpperCamelCase , weights_summary=__UpperCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__UpperCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **__UpperCamelCase , )
if args.do_train:
trainer.fit(__UpperCamelCase )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 194 | 0 |
'''simple docstring'''
from __future__ import annotations
class __a :
def __init__( self : Optional[int] ,lowerCamelCase : list[list[int]] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(lowerCamelCase ) != 0:
__SCREAMING_SNAKE_CASE = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCamelCase ) != cols:
raise error
for value in row:
if not isinstance(lowerCamelCase ,(int, float) ):
raise error
__SCREAMING_SNAKE_CASE = rows
else:
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return len(self.rows )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.rows[0] )
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return self.order[0] == self.order[1]
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCamelCase )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return bool(self.determinant() )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : int ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCamelCase ).determinant()
def UpperCAmelCase__ ( self : str ,lowerCamelCase : int ,lowerCamelCase : int ):
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(lowerCamelCase ,lowerCamelCase )
return -1 * self.get_minor(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return Matrix(
[
[self.get_minor(lowerCamelCase ,lowerCamelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self : List[Any] ):
'''simple docstring'''
return str(self.rows )
def __str__( self : List[str] ):
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(lowerCamelCase ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : list[int] ,lowerCamelCase : int | None = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase ,lowerCamelCase ):
raise type_error
for value in row:
if not isinstance(lowerCamelCase ,(int, float) ):
raise type_error
if len(lowerCamelCase ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = self.rows[0:position] + [row] + self.rows[position:]
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : list[int] ,lowerCamelCase : int | None = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase ,lowerCamelCase ):
raise type_error
for value in column:
if not isinstance(lowerCamelCase ,(int, float) ):
raise type_error
if len(lowerCamelCase ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
__SCREAMING_SNAKE_CASE = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__SCREAMING_SNAKE_CASE = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : int ,lowerCamelCase : object ):
'''simple docstring'''
if not isinstance(lowerCamelCase ,lowerCamelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any ,lowerCamelCase : object ):
'''simple docstring'''
return not self == other
def __neg__( self : Any ):
'''simple docstring'''
return self * -1
def __add__( self : List[Any] ,lowerCamelCase : Matrix ):
'''simple docstring'''
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Any ,lowerCamelCase : Matrix ):
'''simple docstring'''
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Any ,lowerCamelCase : Matrix | int | float ):
'''simple docstring'''
if isinstance(lowerCamelCase ,(int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCamelCase ,lowerCamelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(lowerCamelCase ,lowerCamelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self : Optional[int] ,lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(lowerCamelCase ,lowerCamelCase ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
__SCREAMING_SNAKE_CASE = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def UpperCAmelCase__ ( cls : str ,lowerCamelCase : list[int] ,lowerCamelCase : list[int] ):
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
a = logging.getLogger(__name__)
torch.set_grad_enabled(False)
a = "cuda" if torch.cuda.is_available() else "cpu"
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=" " ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = text.split(__UpperCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )]
def __magic_name__ ( __UpperCAmelCase ) -> dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(__UpperCAmelCase ):
titles.append(title if title is not None else """""" )
texts.append(__UpperCAmelCase )
return {"title": titles, "text": texts}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
__SCREAMING_SNAKE_CASE = ctx_encoder(input_ids.to(device=__UpperCAmelCase ) , return_dict=__UpperCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__SCREAMING_SNAKE_CASE = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__SCREAMING_SNAKE_CASE = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
__SCREAMING_SNAKE_CASE = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__SCREAMING_SNAKE_CASE = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
__SCREAMING_SNAKE_CASE = dataset.map(
partial(__UpperCAmelCase , ctx_encoder=__UpperCAmelCase , ctx_tokenizer=__UpperCAmelCase ) , batched=__UpperCAmelCase , batch_size=processing_args.batch_size , features=__UpperCAmelCase , )
# And finally save your dataset
__SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(__UpperCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__SCREAMING_SNAKE_CASE = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=__UpperCAmelCase )
# And save the index
__SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(__UpperCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __a :
__UpperCamelCase : str = field(
default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ), metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''}, )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'}, )
__UpperCamelCase : str = field(
default='facebook/rag-sequence-nq', metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''}, )
__UpperCamelCase : str = field(
default='facebook/dpr-ctx_encoder-multiset-base', metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
}, )
__UpperCamelCase : Optional[str] = field(
default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-kb' ), metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'}, )
@dataclass
class __a :
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
}, )
__UpperCamelCase : int = field(
default=16, metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
}, )
@dataclass
class __a :
__UpperCamelCase : int = field(
default=768, metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'}, )
__UpperCamelCase : int = field(
default=128, metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
}, )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
a = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
a , a , a = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
a = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 109 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase , lowerCAmelCase = image.size
lowerCAmelCase , lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
lowerCAmelCase = np.array(_snake_case ).astype(np.floataa ) / 255.0
lowerCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
lowerCAmelCase = torch.from_numpy(_snake_case )
return 2.0 * image - 1.0
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
super().__init__()
self.register_modules(vqvae=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
@torch.no_grad()
def __call__( self , UpperCAmelCase_ = None , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 1_00 , UpperCAmelCase_ = 0.0 , UpperCAmelCase_ = None , UpperCAmelCase_ = "pil" , UpperCAmelCase_ = True , ):
if isinstance(UpperCAmelCase_ , PIL.Image.Image ):
lowerCAmelCase = 1
elif isinstance(UpperCAmelCase_ , torch.Tensor ):
lowerCAmelCase = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase_ )}""" )
if isinstance(UpperCAmelCase_ , PIL.Image.Image ):
lowerCAmelCase = preprocess(UpperCAmelCase_ )
lowerCAmelCase , lowerCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCAmelCase = next(self.unet.parameters() ).dtype
lowerCAmelCase = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ )
lowerCAmelCase = image.to(device=self.device , dtype=UpperCAmelCase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase_ , device=self.device )
lowerCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase = {}
if accepts_eta:
lowerCAmelCase = eta
for t in self.progress_bar(UpperCAmelCase_ ):
# concat latents and low resolution image in the channel dimension.
lowerCAmelCase = torch.cat([latents, image] , dim=1 )
lowerCAmelCase = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
lowerCAmelCase = self.unet(UpperCAmelCase_ , UpperCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# decode the image latents with the VQVAE
lowerCAmelCase = self.vqvae.decode(UpperCAmelCase_ ).sample
lowerCAmelCase = torch.clamp(UpperCAmelCase_ , -1.0 , 1.0 )
lowerCAmelCase = image / 2 + 0.5
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 33 |
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ):
def count_of_possible_combinations(_snake_case ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_snake_case )
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ):
def count_of_possible_combinations_with_dp_array(
_snake_case , _snake_case ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , _snake_case )
for item in array )
lowerCAmelCase = answer
return answer
lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_snake_case , _snake_case )
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ):
lowerCAmelCase = [0] * (target + 1)
lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(_snake_case ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ =3
UpperCAmelCase_ =5
UpperCAmelCase_ =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 33 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=False ) -> str:
UpperCamelCase :Any = compute_bleu(
reference_corpus=SCREAMING_SNAKE_CASE_ , translation_corpus=SCREAMING_SNAKE_CASE_ , max_order=SCREAMING_SNAKE_CASE_ , smooth=SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) :Optional[Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
UpperCamelCase :int = str(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = ''''''.join(sorted(SCREAMING_SNAKE_CASE__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _A ( SCREAMING_SNAKE_CASE__ : float = 99 ):
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
UpperCamelCase :Tuple = 0
UpperCamelCase :str = 1
while True:
if check_bouncy(SCREAMING_SNAKE_CASE__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 658 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase):
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
torch.manual_seed(0 )
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def _UpperCamelCase ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def _UpperCamelCase ( self : Dict ) -> int:
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__UpperCamelCase )
def _UpperCamelCase ( self : List[str] ) -> List[str]:
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = DDIMScheduler()
_UpperCamelCase = self.dummy_vq_model
_UpperCamelCase = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type='''numpy''' ).images
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type='''numpy''' , return_dict=__UpperCamelCase )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_UpperCamelCase = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
_UpperCamelCase = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type='''numpy''' ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_UpperCamelCase = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 708 | """simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
UpperCAmelCase = input("""Enter image url: """).strip()
print(F'''Downloading image from {url} ...''')
UpperCAmelCase = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
UpperCAmelCase = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
UpperCAmelCase = requests.get(image_url).content
UpperCAmelCase = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 342 | 0 |
from collections.abc import Sequence
from queue import Queue
class lowercase__ :
def __init__( self : Optional[int] , _lowercase : Tuple , _lowercase : Tuple , _lowercase : Any , _lowercase : int=None , _lowercase : Dict=None ):
"""simple docstring"""
UpperCAmelCase__ = start
UpperCAmelCase__ = end
UpperCAmelCase__ = val
UpperCAmelCase__ = (start + end) // 2
UpperCAmelCase__ = left
UpperCAmelCase__ = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class lowercase__ :
def __init__( self : List[Any] , _lowercase : Sequence , _lowercase : Any ):
"""simple docstring"""
UpperCAmelCase__ = collection
UpperCAmelCase__ = function
if self.collection:
UpperCAmelCase__ = self._build_tree(0 , len(_lowercase ) - 1 )
def _UpperCAmelCase ( self : List[Any] , _lowercase : Dict , _lowercase : Optional[int] ):
"""simple docstring"""
self._update_tree(self.root , _lowercase , _lowercase )
def _UpperCAmelCase ( self : Optional[int] , _lowercase : int , _lowercase : Optional[int] ):
"""simple docstring"""
return self._query_range(self.root , _lowercase , _lowercase )
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[Any] ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_lowercase , _lowercase , self.collection[start] )
UpperCAmelCase__ = (start + end) // 2
UpperCAmelCase__ = self._build_tree(_lowercase , _lowercase )
UpperCAmelCase__ = self._build_tree(mid + 1 , _lowercase )
return SegmentTreeNode(_lowercase , _lowercase , self.fn(left.val , right.val ) , _lowercase , _lowercase )
def _UpperCAmelCase ( self : int , _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == i:
UpperCAmelCase__ = val
return
if i <= node.mid:
self._update_tree(node.left , _lowercase , _lowercase )
else:
self._update_tree(node.right , _lowercase , _lowercase )
UpperCAmelCase__ = self.fn(node.left.val , node.right.val )
def _UpperCAmelCase ( self : Optional[int] , _lowercase : Dict , _lowercase : Dict , _lowercase : Any ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _lowercase , _lowercase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _lowercase , node.mid ) , self._query_range(node.right , node.mid + 1 , _lowercase ) , )
else:
# range in right child tree
return self._query_range(node.right , _lowercase , _lowercase )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
if self.root is not None:
UpperCAmelCase__ = Queue()
queue.put(self.root )
while not queue.empty():
UpperCAmelCase__ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
A = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 475 |
from __future__ import annotations
def __UpperCAmelCase ( __A , __A , __A , ) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 475 | 1 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( A : list , A : int , A : int , A : int ) -> list:
"""simple docstring"""
__snake_case : List[Any] = []
__snake_case : Optional[int] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : Optional[int] = result + left + right
return input_list
def _SCREAMING_SNAKE_CASE ( A : list ) -> list:
"""simple docstring"""
if len(A ) <= 1:
return input_list
__snake_case : Any = list(A )
# iteration for two-way merging
__snake_case : Tuple = 2
while p <= len(A ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A ) , A ):
__snake_case : Union[str, Any] = i
__snake_case : Tuple = i + p - 1
__snake_case : Dict = (low + high + 1) // 2
__snake_case : int = merge(A , A , A , A )
# final merge of last two parts
if p * 2 >= len(A ):
__snake_case : str = i
__snake_case : Any = merge(A , 0 , A , len(A ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__A = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
__A = []
else:
__A = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted)) | 701 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a_ ( UpperCamelCase_ ):
_snake_case = """vit_msn"""
def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-06 , __a=2_2_4 , __a=1_6 , __a=3 , __a=True , **__a , ) -> Any:
"""simple docstring"""
super().__init__(**__a)
__snake_case : List[str] = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : List[str] = initializer_range
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Dict = image_size
__snake_case : int = patch_size
__snake_case : Dict = num_channels
__snake_case : Tuple = qkv_bias | 61 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
_lowerCAmelCase :List[Any] = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
_lowerCAmelCase :Optional[Any] = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
_lowerCAmelCase :List[str] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def __lowerCAmelCase ( self , A , A ) -> Dict:
_UpperCAmelCase : int = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
_UpperCAmelCase : Union[str, Any] = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
_UpperCAmelCase : Union[str, Any] = evaluate(dataset=__A , predictions=__A )
return score
| 506 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowercase_ ( A ):
__lowerCamelCase = (DPMSolverSDEScheduler,)
__lowerCamelCase = 1_0
def _snake_case ( self , **__A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Optional[int] ={
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__A )
return config
def _snake_case ( self ) -> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__A )
def _snake_case ( self ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def _snake_case ( self ) -> Tuple:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__A )
def _snake_case ( self ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] =self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ : int =self.dummy_model()
SCREAMING_SNAKE_CASE_ : List[str] =self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : int =sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : List[Any] =scheduler.scale_model_input(__A , __A )
SCREAMING_SNAKE_CASE_ : Dict =model(__A , __A )
SCREAMING_SNAKE_CASE_ : Tuple =scheduler.step(__A , __A , __A )
SCREAMING_SNAKE_CASE_ : str =output.prev_sample
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : List[str] =torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1e-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : List[str] =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ : List[Any] =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ : Tuple =self.dummy_model()
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : Tuple =sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =scheduler.scale_model_input(__A , __A )
SCREAMING_SNAKE_CASE_ : str =model(__A , __A )
SCREAMING_SNAKE_CASE_ : Any =scheduler.step(__A , __A , __A )
SCREAMING_SNAKE_CASE_ : Tuple =output.prev_sample
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1e-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1e-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1e-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1e-3
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps , device=__A )
SCREAMING_SNAKE_CASE_ : int =self.dummy_model()
SCREAMING_SNAKE_CASE_ : int =self.dummy_sample_deter.to(__A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =scheduler.scale_model_input(__A , __A )
SCREAMING_SNAKE_CASE_ : Dict =model(__A , __A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =scheduler.step(__A , __A , __A )
SCREAMING_SNAKE_CASE_ : List[Any] =output.prev_sample
SCREAMING_SNAKE_CASE_ : List[Any] =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : int =torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1e-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] =self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Dict =scheduler_class(**__A , use_karras_sigmas=__A )
scheduler.set_timesteps(self.num_inference_steps , device=__A )
SCREAMING_SNAKE_CASE_ : Dict =self.dummy_model()
SCREAMING_SNAKE_CASE_ : List[Any] =self.dummy_sample_deter.to(__A ) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : str =sample.to(__A )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Any =scheduler.scale_model_input(__A , __A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =model(__A , __A )
SCREAMING_SNAKE_CASE_ : int =scheduler.step(__A , __A , __A )
SCREAMING_SNAKE_CASE_ : List[str] =output.prev_sample
SCREAMING_SNAKE_CASE_ : str =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
| 443 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''PoolFormerFeatureExtractor''']
__magic_name__ = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 679 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | 1 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ =logging.getLogger()
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument("""-f""" )
__lowercase = parser.parse_args()
return args.f
class lowerCamelCase__ ( _a ):
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
__lowercase = logging.StreamHandler(sys.stdout )
logger.addHandler(A_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : int ):
'''simple docstring'''
__lowercase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(A_ , """argv""" , A_ ):
__lowercase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(A_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(A_ )
__lowercase = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(A_ )
__lowercase = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(A_ )
| 616 |
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self : Optional[Any] , A_ : Dict , A_ : str , A_ : Any ):
'''simple docstring'''
__lowercase = None
__lowercase = None
__lowercase = graph
self._normalize_graph(A_ , A_ )
__lowercase = len(A_ )
__lowercase = None
def SCREAMING_SNAKE_CASE_ ( self : Dict , A_ : str , A_ : List[Any] ):
'''simple docstring'''
if sources is int:
__lowercase = [sources]
if sinks is int:
__lowercase = [sinks]
if len(A_ ) == 0 or len(A_ ) == 0:
return
__lowercase = sources[0]
__lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A_ ) > 1 or len(A_ ) > 1:
__lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__lowercase = max_input_flow
__lowercase = 0
__lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__lowercase = max_input_flow
__lowercase = size - 1
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : Dict ):
'''simple docstring'''
__lowercase = algorithm(self )
class lowerCamelCase__ :
def __init__( self : Tuple , A_ : Optional[int] ):
'''simple docstring'''
__lowercase = flow_network
__lowercase = flow_network.verticesCount
__lowercase = flow_network.sourceIndex
__lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__lowercase = flow_network.graph
__lowercase = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
if not self.executed:
self._algorithm()
__lowercase = True
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
class lowerCamelCase__ ( _a ):
def __init__( self : Union[str, Any] , A_ : int ):
'''simple docstring'''
super().__init__(A_ )
# use this to save your result
__lowercase = -1
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase__ ( _a ):
def __init__( self : List[str] , A_ : Tuple ):
'''simple docstring'''
super().__init__(A_ )
__lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
__lowercase = [0] * self.verticies_count
__lowercase = [0] * self.verticies_count
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__lowercase = 0
while i < len(A_ ):
__lowercase = vertices_list[i]
__lowercase = self.heights[vertex_index]
self.process_vertex(A_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(A_ ) )
__lowercase = 0
else:
i += 1
__lowercase = sum(self.preflow[self.source_index] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Optional[int] ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A_ , A_ )
self.relabel(A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , A_ : Union[str, Any] , A_ : List[str] ):
'''simple docstring'''
__lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Optional[Any] ):
'''simple docstring'''
__lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__lowercase = self.heights[to_index]
if min_height is not None:
__lowercase = min_height + 1
if __name__ == "__main__":
UpperCAmelCase__ =[0]
UpperCAmelCase__ =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase__ =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase__ =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase__ =flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 616 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( A__: list , A__: list ) -> list:
if len(A__ ) != 2 or len(a[0] ) != 2 or len(A__ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__lowerCamelCase : int = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase ( A__: list , A__: list ) -> Tuple:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(A__ ) )
]
def UpperCAmelCase ( A__: list , A__: list ) -> Optional[Any]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(A__ ) )
]
def UpperCAmelCase ( A__: list ) -> tuple[list, list, list, list]:
if len(A__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__lowerCamelCase : Optional[Any] = len(A__ )
__lowerCamelCase : str = matrix_length // 2
__lowerCamelCase : Union[str, Any] = [[a[i][j] for j in range(A__ , A__ )] for i in range(A__ )]
__lowerCamelCase : Union[str, Any] = [
[a[i][j] for j in range(A__ , A__ )] for i in range(A__ , A__ )
]
__lowerCamelCase : Optional[int] = [[a[i][j] for j in range(A__ )] for i in range(A__ )]
__lowerCamelCase : str = [[a[i][j] for j in range(A__ )] for i in range(A__ , A__ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase ( A__: list ) -> tuple[int, int]:
return len(A__ ), len(matrix[0] )
def UpperCAmelCase ( A__: list ) -> None:
print('\n'.join(str(A__ ) for line in matrix ) )
def UpperCAmelCase ( A__: list , A__: list ) -> list:
if matrix_dimensions(A__ ) == (2, 2):
return default_matrix_multiplication(A__ , A__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = split_matrix(A__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = split_matrix(A__ )
__lowerCamelCase : Union[str, Any] = actual_strassen(A__ , matrix_subtraction(A__ , A__ ) )
__lowerCamelCase : str = actual_strassen(matrix_addition(A__ , A__ ) , A__ )
__lowerCamelCase : List[Any] = actual_strassen(matrix_addition(A__ , A__ ) , A__ )
__lowerCamelCase : Any = actual_strassen(A__ , matrix_subtraction(A__ , A__ ) )
__lowerCamelCase : Optional[int] = actual_strassen(matrix_addition(A__ , A__ ) , matrix_addition(A__ , A__ ) )
__lowerCamelCase : Dict = actual_strassen(matrix_subtraction(A__ , A__ ) , matrix_addition(A__ , A__ ) )
__lowerCamelCase : Optional[int] = actual_strassen(matrix_subtraction(A__ , A__ ) , matrix_addition(A__ , A__ ) )
__lowerCamelCase : str = matrix_addition(matrix_subtraction(matrix_addition(A__ , A__ ) , A__ ) , A__ )
__lowerCamelCase : Union[str, Any] = matrix_addition(A__ , A__ )
__lowerCamelCase : List[str] = matrix_addition(A__ , A__ )
__lowerCamelCase : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(A__ , A__ ) , A__ ) , A__ )
# construct the new matrix from our 4 quadrants
__lowerCamelCase : Optional[Any] = []
for i in range(len(A__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(A__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase ( A__: list , A__: list ) -> list:
if matrix_dimensions(A__ )[1] != matrix_dimensions(A__ )[0]:
__lowerCamelCase : str = (
'Unable to multiply these matrices, please check the dimensions.\n'
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(A__ )
__lowerCamelCase : List[Any] = matrix_dimensions(A__ )
__lowerCamelCase : str = matrix_dimensions(A__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCamelCase : Optional[Any] = max(*A__ , *A__ )
__lowerCamelCase : Optional[Any] = int(math.pow(2 , math.ceil(math.loga(A__ ) ) ) )
__lowerCamelCase : List[Any] = matrixa
__lowerCamelCase : Optional[Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , A__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , A__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , A__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCamelCase : Optional[Any] = actual_strassen(A__ , A__ )
# Removing the additional zeros
for i in range(0 , A__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , A__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a_ : List[str] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a_ : Tuple = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 263 |
"""simple docstring"""
import requests
a_ : Optional[Any] = '''''' # <-- Put your OpenWeatherMap appid here!
a_ : str = '''https://api.openweathermap.org/data/2.5/'''
def UpperCAmelCase ( A__: str = "Chicago" , A__: str = APPID ) -> dict:
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def UpperCAmelCase ( A__: str = "Kolkata, India" , A__: str = APPID ) -> dict:
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def UpperCAmelCase ( A__: float = 55.68 , A__: float = 12.57 , A__: str = APPID ) -> dict:
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
a_ : List[Any] = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 263 | 1 |
"""simple docstring"""
def _lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str ):
lowercase__ : int = len(lowerCamelCase__ )
lowercase__ : int = len(lowerCamelCase__ )
lowercase__ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowercase__ : list = []
for char_count in range(lowerCamelCase__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ') | 200 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '▁'
__snake_case = {'vocab_file': 'sentencepiece.bpe.model'}
__snake_case = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
__snake_case = {
'facebook/xglm-564M': 2048,
}
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : Optional[int] = VOCAB_FILES_NAMES
_a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
lowercase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Dict = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase__ : Tuple = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
lowercase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
lowercase__ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Any = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
lowercase__ : Optional[int] = len(self.sp_model )
lowercase__ : Union[str, Any] = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowerCamelCase__ )
lowercase__ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
lowercase__ : Any = self.__dict__.copy()
lowercase__ : List[str] = None
lowercase__ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase__ ) -> Optional[int]:
lowercase__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ : Union[str, Any] = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : List[str] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ ))
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ ))
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
lowercase__ : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase__( self ) -> Union[str, Any]:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase__( self ) -> int:
lowercase__ : Optional[Any] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : List[Any] = self.sp_model.PieceToId(lowerCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
lowercase__ : Dict = """""".join(lowerCamelCase__ ).replace(lowerCamelCase__ , """ """ ).strip()
return out_string
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : int = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
lowercase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,) | 200 | 1 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2)):
a__ = tau * frequency / samplerate
a__ = sin(lowerCamelCase_)
a__ = cos(lowerCamelCase_)
a__ = _sin / (2 * q_factor)
a__ = (1 - _cos) / 2
a__ = 1 - _cos
a__ = 1 + alpha
a__ = -2 * _cos
a__ = 1 - alpha
a__ = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2)):
a__ = tau * frequency / samplerate
a__ = sin(lowerCamelCase_)
a__ = cos(lowerCamelCase_)
a__ = _sin / (2 * q_factor)
a__ = (1 + _cos) / 2
a__ = -1 - _cos
a__ = 1 + alpha
a__ = -2 * _cos
a__ = 1 - alpha
a__ = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2)):
a__ = tau * frequency / samplerate
a__ = sin(lowerCamelCase_)
a__ = cos(lowerCamelCase_)
a__ = _sin / (2 * q_factor)
a__ = _sin / 2
a__ = 0
a__ = -ba
a__ = 1 + alpha
a__ = -2 * _cos
a__ = 1 - alpha
a__ = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2)):
a__ = tau * frequency / samplerate
a__ = sin(lowerCamelCase_)
a__ = cos(lowerCamelCase_)
a__ = _sin / (2 * q_factor)
a__ = 1 - alpha
a__ = -2 * _cos
a__ = 1 + alpha
a__ = IIRFilter(2)
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba])
return filt
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2) , ):
a__ = tau * frequency / samplerate
a__ = sin(lowerCamelCase_)
a__ = cos(lowerCamelCase_)
a__ = _sin / (2 * q_factor)
a__ = 10 ** (gain_db / 40)
a__ = 1 + alpha * big_a
a__ = -2 * _cos
a__ = 1 - alpha * big_a
a__ = 1 + alpha / big_a
a__ = -2 * _cos
a__ = 1 - alpha / big_a
a__ = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2) , ):
a__ = tau * frequency / samplerate
a__ = sin(lowerCamelCase_)
a__ = cos(lowerCamelCase_)
a__ = _sin / (2 * q_factor)
a__ = 10 ** (gain_db / 40)
a__ = (big_a + 1) - (big_a - 1) * _cos
a__ = (big_a + 1) + (big_a - 1) * _cos
a__ = (big_a - 1) - (big_a + 1) * _cos
a__ = (big_a - 1) + (big_a + 1) * _cos
a__ = 2 * sqrt(lowerCamelCase_) * alpha
a__ = big_a * (pmc + aaa)
a__ = 2 * big_a * mpc
a__ = big_a * (pmc - aaa)
a__ = ppmc + aaa
a__ = -2 * pmpc
a__ = ppmc - aaa
a__ = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2) , ):
a__ = tau * frequency / samplerate
a__ = sin(lowerCamelCase_)
a__ = cos(lowerCamelCase_)
a__ = _sin / (2 * q_factor)
a__ = 10 ** (gain_db / 40)
a__ = (big_a + 1) - (big_a - 1) * _cos
a__ = (big_a + 1) + (big_a - 1) * _cos
a__ = (big_a - 1) - (big_a + 1) * _cos
a__ = (big_a - 1) + (big_a + 1) * _cos
a__ = 2 * sqrt(lowerCamelCase_) * alpha
a__ = big_a * (ppmc + aaa)
a__ = -2 * big_a * pmpc
a__ = big_a * (ppmc - aaa)
a__ = pmc + aaa
a__ = 2 * mpc
a__ = pmc - aaa
a__ = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
| 200 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : List[Any] = logging.get_logger(__name__)
__a : Tuple = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='align_text_model'
def __init__( self: Tuple , __A: Optional[int]=30522 , __A: Tuple=768 , __A: Any=12 , __A: Any=12 , __A: Dict=3072 , __A: Tuple="gelu" , __A: Union[str, Any]=0.1 , __A: List[str]=0.1 , __A: Optional[int]=512 , __A: Tuple=2 , __A: str=0.0_2 , __A: int=1e-12 , __A: Optional[int]=0 , __A: Optional[int]="absolute" , __A: List[Any]=True , **__A: Tuple , ):
'''simple docstring'''
super().__init__(**__A )
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = position_embedding_type
a__ = use_cache
a__ = pad_token_id
@classmethod
def lowercase ( cls: Any , __A: Union[str, os.PathLike] , **__A: Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(__A )
a__ ,a__ = cls.get_config_dict(__A , **__A )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
a__ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A , **__A )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='align_vision_model'
def __init__( self: Dict , __A: int = 3 , __A: int = 600 , __A: float = 2.0 , __A: float = 3.1 , __A: int = 8 , __A: List[int] = [3, 3, 5, 3, 5, 5, 3] , __A: List[int] = [32, 16, 24, 40, 80, 112, 192] , __A: List[int] = [16, 24, 40, 80, 112, 192, 320] , __A: List[int] = [] , __A: List[int] = [1, 2, 2, 2, 1, 2, 1] , __A: List[int] = [1, 2, 2, 3, 3, 4, 1] , __A: List[int] = [1, 6, 6, 6, 6, 6, 6] , __A: float = 0.2_5 , __A: str = "swish" , __A: int = 2560 , __A: str = "mean" , __A: float = 0.0_2 , __A: float = 0.0_0_1 , __A: float = 0.9_9 , __A: float = 0.2 , **__A: List[Any] , ):
'''simple docstring'''
super().__init__(**__A )
a__ = num_channels
a__ = image_size
a__ = width_coefficient
a__ = depth_coefficient
a__ = depth_divisor
a__ = kernel_sizes
a__ = in_channels
a__ = out_channels
a__ = depthwise_padding
a__ = strides
a__ = num_block_repeats
a__ = expand_ratios
a__ = squeeze_expansion_ratio
a__ = hidden_act
a__ = hidden_dim
a__ = pooling_type
a__ = initializer_range
a__ = batch_norm_eps
a__ = batch_norm_momentum
a__ = drop_connect_rate
a__ = sum(__A ) * 4
@classmethod
def lowercase ( cls: Dict , __A: Union[str, os.PathLike] , **__A: List[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(__A )
a__ ,a__ = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
a__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A , **__A )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='align'
_SCREAMING_SNAKE_CASE =True
def __init__( self: Optional[int] , __A: Optional[int]=None , __A: Dict=None , __A: List[str]=640 , __A: Optional[int]=1.0 , __A: str=0.0_2 , **__A: List[str] , ):
'''simple docstring'''
super().__init__(**__A )
if text_config is None:
a__ = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
a__ = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
a__ = AlignTextConfig(**__A )
a__ = AlignVisionConfig(**__A )
a__ = projection_dim
a__ = temperature_init_value
a__ = initializer_range
@classmethod
def lowercase ( cls: Dict , __A: AlignTextConfig , __A: AlignVisionConfig , **__A: str ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def lowercase ( self: Any ):
'''simple docstring'''
a__ = copy.deepcopy(self.__dict__ )
a__ = self.text_config.to_dict()
a__ = self.vision_config.to_dict()
a__ = self.__class__.model_type
return output
| 200 | 1 |
import os
def lowerCamelCase_ ( ):
'''simple docstring'''
with open(os.path.dirname(UpperCamelCase__ ) + '''/p022_names.txt''' ) as file:
UpperCamelCase__ = str(file.readlines()[0] )
UpperCamelCase__ = names.replace('''"''', '''''' ).split(''',''' )
names.sort()
UpperCamelCase__ = 0
UpperCamelCase__ = 0
for i, name in enumerate(UpperCamelCase__ ):
for letter in name:
name_score += ord(UpperCamelCase__ ) - 64
total_score += (i + 1) * name_score
UpperCamelCase__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 240 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
lowercase = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 240 | 1 |
'''simple docstring'''
_A : Union[str, Any] = 9.8_0_6_6_5
def UpperCamelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float = g ) -> float:
'''simple docstring'''
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 330 | '''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class _lowercase :
'''simple docstring'''
def __init__( self : Tuple ) -> Any:
__lowerCAmelCase = {}
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> None:
__lowerCAmelCase = {}
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : float ) -> None:
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = probability
def a ( self : Union[str, Any] ) -> list[str]:
return list(self.connections )
def a ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> str:
__lowerCAmelCase = 0
__lowerCAmelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : list[tuple[str, str, float]] , snake_case_ : int ) -> dict[str, int]:
'''simple docstring'''
__lowerCAmelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = Counter(graph.get_nodes() )
__lowerCAmelCase = start
for _ in range(snake_case_ ):
__lowerCAmelCase = graph.transition(snake_case_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( __snake_case : list[int] , __snake_case : int , __snake_case : int , __snake_case : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def _snake_case ( __snake_case : list[int] , __snake_case : int , __snake_case : int , __snake_case : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : List[str] = int(length / 2 )
for i in range(__snake_case , low + middle ):
comp_and_swap(__snake_case , __snake_case , i + middle , __snake_case )
bitonic_merge(__snake_case , __snake_case , __snake_case , __snake_case )
bitonic_merge(__snake_case , low + middle , __snake_case , __snake_case )
def _snake_case ( __snake_case : list[int] , __snake_case : int , __snake_case : int , __snake_case : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : List[str] = int(length / 2 )
bitonic_sort(__snake_case , __snake_case , __snake_case , 1 )
bitonic_sort(__snake_case , low + middle , __snake_case , 0 )
bitonic_merge(__snake_case , __snake_case , __snake_case , __snake_case )
if __name__ == "__main__":
UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 88 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
UpperCamelCase__ = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def UpperCAmelCase ( snake_case : str = "dhaka" , snake_case : int = 5 ):
_lowerCAmelCase:Tuple = min(snake_case , 50 ) # Prevent abuse!
_lowerCAmelCase:Dict = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
_lowerCAmelCase:List[Any] = requests.get('''https://www.google.com/search''' , params=snake_case , headers=snake_case )
_lowerCAmelCase:int = BeautifulSoup(html.text , '''html.parser''' )
_lowerCAmelCase:Tuple = ''''''.join(
re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
_lowerCAmelCase:str = json.dumps(snake_case )
_lowerCAmelCase:Optional[Any] = json.loads(snake_case )
_lowerCAmelCase:int = re.findall(
R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , snake_case , )
if not matched_google_image_data:
return 0
_lowerCAmelCase:Tuple = re.sub(
R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(snake_case ) , )
_lowerCAmelCase:str = re.findall(
R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , snake_case , )
for index, fixed_full_res_image in enumerate(snake_case ):
if index >= max_images:
return index
_lowerCAmelCase:List[str] = bytes(snake_case , '''ascii''' ).decode(
'''unicode-escape''' )
_lowerCAmelCase:str = bytes(snake_case , '''ascii''' ).decode(
'''unicode-escape''' )
_lowerCAmelCase:Dict = urllib.request.build_opener()
_lowerCAmelCase:int = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(snake_case )
_lowerCAmelCase:str = F'query_{query.replace(" " , "_" )}'
if not os.path.exists(snake_case ):
os.makedirs(snake_case )
urllib.request.urlretrieve( # noqa: S310
snake_case , F'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
UpperCamelCase__ = download_images_from_google_query(sys.argv[1])
print(F"{image_count} images were downloaded to disk.")
except IndexError:
print('''Please provide a search term.''')
raise
| 227 | 0 |
from __future__ import annotations
class _UpperCamelCase :
def __init__( self: int , _SCREAMING_SNAKE_CASE: int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = order
# a_{0} ... a_{k}
UpperCamelCase_ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCamelCase_ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCamelCase_ = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCamelCase_ = [0.0] * self.order
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: list[float] , _SCREAMING_SNAKE_CASE: list[float] ) -> Any:
"""simple docstring"""
if len(__A ) < self.order:
UpperCamelCase_ = [1.0, *a_coeffs]
if len(__A ) != self.order + 1:
UpperCamelCase_ = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__A )}'''
)
raise ValueError(__A )
if len(__A ) != self.order + 1:
UpperCamelCase_ = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__A )}'''
)
raise ValueError(__A )
UpperCamelCase_ = a_coeffs
UpperCamelCase_ = b_coeffs
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: float ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCamelCase_ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCamelCase_ = self.input_history[:-1]
UpperCamelCase_ = self.output_history[:-1]
UpperCamelCase_ = sample
UpperCamelCase_ = result
return result
| 703 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : List[str] = '''openai-gpt'''
_UpperCamelCase : List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self: int , _SCREAMING_SNAKE_CASE: int=40478 , _SCREAMING_SNAKE_CASE: Optional[Any]=512 , _SCREAMING_SNAKE_CASE: List[Any]=768 , _SCREAMING_SNAKE_CASE: str=12 , _SCREAMING_SNAKE_CASE: Any=12 , _SCREAMING_SNAKE_CASE: Optional[int]="gelu" , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=0.1 , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.1 , _SCREAMING_SNAKE_CASE: Any=1e-5 , _SCREAMING_SNAKE_CASE: Tuple=0.02 , _SCREAMING_SNAKE_CASE: str="cls_index" , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: List[Any]=True , _SCREAMING_SNAKE_CASE: List[Any]=0.1 , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> int:
"""simple docstring"""
UpperCamelCase_ = vocab_size
UpperCamelCase_ = n_positions
UpperCamelCase_ = n_embd
UpperCamelCase_ = n_layer
UpperCamelCase_ = n_head
UpperCamelCase_ = afn
UpperCamelCase_ = resid_pdrop
UpperCamelCase_ = embd_pdrop
UpperCamelCase_ = attn_pdrop
UpperCamelCase_ = layer_norm_epsilon
UpperCamelCase_ = initializer_range
UpperCamelCase_ = summary_type
UpperCamelCase_ = summary_use_proj
UpperCamelCase_ = summary_activation
UpperCamelCase_ = summary_first_dropout
UpperCamelCase_ = summary_proj_to_labels
super().__init__(**_SCREAMING_SNAKE_CASE )
| 371 | 0 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__lowerCamelCase : List[str] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__lowerCamelCase : Any = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
snake_case__ : Optional[Any] = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=snake_case_ )[0]
@deprecated(snake_case_ , "Please use tf.data to implement this functionality." )
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=snake_case_ ) as bytestream:
snake_case__ : List[Any] = _readaa(snake_case_ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
snake_case__ : str = _readaa(snake_case_ )
snake_case__ : Optional[Any] = _readaa(snake_case_ )
snake_case__ : Dict = _readaa(snake_case_ )
snake_case__ : Tuple = bytestream.read(rows * cols * num_images )
snake_case__ : int = numpy.frombuffer(snake_case_ , dtype=numpy.uinta )
snake_case__ : Optional[Any] = data.reshape(snake_case_ , snake_case_ , snake_case_ , 1 )
return data
@deprecated(snake_case_ , "Please use tf.one_hot on tensors." )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : List[str] ):
snake_case__ : Optional[int] = labels_dense.shape[0]
snake_case__ : Tuple = numpy.arange(snake_case_ ) * num_classes
snake_case__ : Dict = numpy.zeros((num_labels, num_classes) )
snake_case__ : List[Any] = 1
return labels_one_hot
@deprecated(snake_case_ , "Please use tf.data to implement this functionality." )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int]=False , snake_case_ : str=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=snake_case_ ) as bytestream:
snake_case__ : Optional[Any] = _readaa(snake_case_ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
snake_case__ : int = _readaa(snake_case_ )
snake_case__ : Union[str, Any] = bytestream.read(snake_case_ )
snake_case__ : Optional[Any] = numpy.frombuffer(snake_case_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(snake_case_ , snake_case_ )
return labels
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@deprecated(
a__ , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self : List[Any] , __A : Optional[Any] , __A : List[Any] , __A : List[str]=False , __A : List[Any]=False , __A : str=dtypes.floataa , __A : Dict=True , __A : Optional[Any]=None , ):
snake_case__, snake_case__ : Tuple = random_seed.get_seed(a__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
snake_case__ : str = dtypes.as_dtype(a__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
snake_case__ : List[str] = 1_0_0_0_0
snake_case__ : Union[str, Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
snake_case__ : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
snake_case__ : Union[str, Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
snake_case__ : Optional[int] = images.astype(numpy.floataa )
snake_case__ : Optional[int] = numpy.multiply(a__ , 1.0 / 2_5_5.0 )
snake_case__ : Optional[int] = images
snake_case__ : List[Any] = labels
snake_case__ : Optional[Any] = 0
snake_case__ : str = 0
@property
def _lowercase ( self : List[str] ):
return self._images
@property
def _lowercase ( self : Union[str, Any] ):
return self._labels
@property
def _lowercase ( self : List[Any] ):
return self._num_examples
@property
def _lowercase ( self : Any ):
return self._epochs_completed
def _lowercase ( self : str , __A : Optional[Any] , __A : Optional[Any]=False , __A : Union[str, Any]=True ):
if fake_data:
snake_case__ : Any = [1] * 7_8_4
snake_case__ : Any = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(a__ )],
[fake_label for _ in range(a__ )],
)
snake_case__ : Optional[int] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
snake_case__ : Tuple = numpy.arange(self._num_examples )
numpy.random.shuffle(a__ )
snake_case__ : Union[str, Any] = self.images[perma]
snake_case__ : int = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
snake_case__ : Dict = self._num_examples - start
snake_case__ : Optional[int] = self._images[start : self._num_examples]
snake_case__ : Union[str, Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
snake_case__ : str = numpy.arange(self._num_examples )
numpy.random.shuffle(a__ )
snake_case__ : Dict = self.images[perm]
snake_case__ : int = self.labels[perm]
# Start next epoch
snake_case__ : Dict = 0
snake_case__ : Union[str, Any] = batch_size - rest_num_examples
snake_case__ : Union[str, Any] = self._index_in_epoch
snake_case__ : Optional[int] = self._images[start:end]
snake_case__ : List[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
snake_case__ : Any = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(snake_case_ , "Please write your own downloading logic." )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int ):
if not gfile.Exists(snake_case_ ):
gfile.MakeDirs(snake_case_ )
snake_case__ : List[str] = os.path.join(snake_case_ , snake_case_ )
if not gfile.Exists(snake_case_ ):
urllib.request.urlretrieve(snake_case_ , snake_case_ ) # noqa: S310
with gfile.GFile(snake_case_ ) as f:
snake_case__ : Tuple = f.size()
print("Successfully downloaded" , snake_case_ , snake_case_ , "bytes." )
return filepath
@deprecated(
snake_case_ , "Please use alternatives such as:" " tensorflow_datasets.load(\'mnist\')" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Dict=False , snake_case_ : Tuple=False , snake_case_ : str=dtypes.floataa , snake_case_ : str=True , snake_case_ : Union[str, Any]=5000 , snake_case_ : List[Any]=None , snake_case_ : str=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=snake_case_ , one_hot=snake_case_ , dtype=snake_case_ , seed=snake_case_ )
snake_case__ : Optional[Any] = fake()
snake_case__ : Any = fake()
snake_case__ : Any = fake()
return _Datasets(train=snake_case_ , validation=snake_case_ , test=snake_case_ )
if not source_url: # empty string check
snake_case__ : Optional[int] = DEFAULT_SOURCE_URL
snake_case__ : Any = "train-images-idx3-ubyte.gz"
snake_case__ : Optional[int] = "train-labels-idx1-ubyte.gz"
snake_case__ : Optional[Any] = "t10k-images-idx3-ubyte.gz"
snake_case__ : List[Any] = "t10k-labels-idx1-ubyte.gz"
snake_case__ : List[str] = _maybe_download(
snake_case_ , snake_case_ , source_url + train_images_file )
with gfile.Open(snake_case_ , "rb" ) as f:
snake_case__ : str = _extract_images(snake_case_ )
snake_case__ : Optional[int] = _maybe_download(
snake_case_ , snake_case_ , source_url + train_labels_file )
with gfile.Open(snake_case_ , "rb" ) as f:
snake_case__ : List[Any] = _extract_labels(snake_case_ , one_hot=snake_case_ )
snake_case__ : Tuple = _maybe_download(
snake_case_ , snake_case_ , source_url + test_images_file )
with gfile.Open(snake_case_ , "rb" ) as f:
snake_case__ : Dict = _extract_images(snake_case_ )
snake_case__ : List[Any] = _maybe_download(
snake_case_ , snake_case_ , source_url + test_labels_file )
with gfile.Open(snake_case_ , "rb" ) as f:
snake_case__ : Any = _extract_labels(snake_case_ , one_hot=snake_case_ )
if not 0 <= validation_size <= len(snake_case_ ):
snake_case__ : str = (
"Validation size should be between 0 and "
F'''{len(snake_case_ )}. Received: {validation_size}.'''
)
raise ValueError(snake_case_ )
snake_case__ : Optional[int] = train_images[:validation_size]
snake_case__ : List[str] = train_labels[:validation_size]
snake_case__ : Tuple = train_images[validation_size:]
snake_case__ : Dict = train_labels[validation_size:]
snake_case__ : str = {"dtype": dtype, "reshape": reshape, "seed": seed}
snake_case__ : Dict = _DataSet(snake_case_ , snake_case_ , **snake_case_ )
snake_case__ : Optional[int] = _DataSet(snake_case_ , snake_case_ , **snake_case_ )
snake_case__ : Dict = _DataSet(snake_case_ , snake_case_ , **snake_case_ )
return _Datasets(train=snake_case_ , validation=snake_case_ , test=snake_case_ )
| 297 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : int )-> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> list[str]:
_lowerCamelCase = []
_lowerCamelCase = 11
_lowerCamelCase = int('1' + '0' * digit_len )
for num in range(snake_case , snake_case ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(snake_case , snake_case ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
_lowerCamelCase = 10
return solutions
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 2 )-> int:
_lowerCamelCase = 1.0
for fraction in fraction_list(snake_case ):
_lowerCamelCase = Fraction(snake_case )
result *= frac.denominator / frac.numerator
return int(snake_case )
if __name__ == "__main__":
print(solution())
| 650 | 0 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class a ( unittest.TestCase ):
def A_ ( self : Optional[int] ):
snake_case_ = [10, 20, 30, 40, 50, 60]
snake_case_ = [2, 4, 6, 8, 10, 12]
snake_case_ = 100
self.assertEqual(kp.calc_profit(lowercase_ , lowercase_ , lowercase_ ) , 210 )
def A_ ( self : str ):
self.assertRaisesRegex(lowercase_ , '''max_weight must greater than zero.''' )
def A_ ( self : int ):
self.assertRaisesRegex(lowercase_ , '''Weight can not be negative.''' )
def A_ ( self : List[str] ):
self.assertRaisesRegex(lowercase_ , '''Profit can not be negative.''' )
def A_ ( self : Dict ):
self.assertRaisesRegex(lowercase_ , '''max_weight must greater than zero.''' )
def A_ ( self : Union[str, Any] ):
self.assertRaisesRegex(
lowercase_ , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 593 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
snake_case_ ,snake_case_ = 1, 1
snake_case_ = 2
while True:
snake_case_ = 0
snake_case_ = fa + fa
snake_case_ ,snake_case_ = fa, f
index += 1
for _ in str(__UpperCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 593 | 1 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Dict = WavaVecaPhonemeCTCTokenizer
A__ : List[str] = False
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
_snake_case = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
_snake_case = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
_snake_case = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + '''\n''' )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Any , __lowerCamelCase : Dict=False , __lowerCamelCase : List[Any]=2_0 , __lowerCamelCase : int=5 ):
"""simple docstring"""
_snake_case = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCamelCase )) for i in range(len(__lowerCamelCase ) )]
_snake_case = list(filter(lambda __lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__lowerCamelCase ) , __lowerCamelCase ) )
if max_length is not None and len(__lowerCamelCase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(__lowerCamelCase ) < min_length and len(__lowerCamelCase ) > 0:
while len(__lowerCamelCase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
if " " not in output_txt and len(__lowerCamelCase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCamelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCamelCase )
)
if with_prefix_space:
_snake_case = ''' ''' + output_txt
_snake_case = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
return output_txt, output_ids
def __UpperCAmelCase ( self : List[Any] , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
_snake_case = tokenizer('''m xxx ɪ''' , do_phonemize=__lowerCamelCase ).input_ids
self.assertEqual(__lowerCamelCase , [1_3, 3_9_2, 1_7] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
_snake_case = tokenizer('''m aaa ɪ ccc''' , do_phonemize=__lowerCamelCase ).input_ids
self.assertEqual(__lowerCamelCase , [1_3, 3_9_3, 1_7, 3_9_5] ) # aaa and ccc should be after xxx and 2 after aaa
_snake_case = tokenizer('''maɪ c''' , do_phonemize=__lowerCamelCase ).input_ids
self.assertEqual(__lowerCamelCase , [3, 2_0_0] ) # mai should be <unk> (=3)
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer.phonemize(__lowerCamelCase , phonemizer_lang='''en-us''' )
self.assertEqual(__lowerCamelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer.phonemize(__lowerCamelCase , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(__lowerCamelCase ).input_ids , tokenizer(__lowerCamelCase , do_phonemize=__lowerCamelCase ).input_ids )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer.phonemize(__lowerCamelCase , phonemizer_lang='''en-us''' )
_snake_case = tokenizer.decode(tokenizer(__lowerCamelCase ).input_ids )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
_snake_case = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
_snake_case = tokenizer.decode(sample_ids[0] )
_snake_case = tokenizer.batch_decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , batch_tokens[0] )
self.assertEqual(__lowerCamelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer.phonemize(__lowerCamelCase , phonemizer_lang='''en-us''' )
self.assertEqual(__lowerCamelCase , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer.phonemize(__lowerCamelCase , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(__lowerCamelCase ).input_ids , tokenizer(__lowerCamelCase , do_phonemize=__lowerCamelCase ).input_ids )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
_snake_case = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
_snake_case = tokenizer.decode(sample_ids[0] )
_snake_case = tokenizer.batch_decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , batch_tokens[0] )
self.assertEqual(__lowerCamelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
_snake_case = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__lowerCamelCase )
_snake_case = tokenizer.batch_decode(__lowerCamelCase , filter_word_delimiter_token=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , batch_tokens[0] )
self.assertEqual(__lowerCamelCase , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer.phonemize(__lowerCamelCase , phonemizer_lang='''en-us''' )
_snake_case = tokenizer.decode(tokenizer(__lowerCamelCase ).input_ids , filter_word_delimiter_token=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer.phonemize(__lowerCamelCase , phonemizer_lang='''en-us''' )
_snake_case = tokenizer.decode(tokenizer(__lowerCamelCase ).input_ids , filter_word_delimiter_token=__lowerCamelCase )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=__lowerCamelCase )
_snake_case = '''Hello how are you'''
_snake_case = tokenizer(__lowerCamelCase , phonemizer_lang='''en-us''' ).input_ids
_snake_case = tokenizer(__lowerCamelCase , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
_snake_case = tokenizer.decode(__lowerCamelCase )
_snake_case = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(__lowerCamelCase , '''ɛ l o h aʊ a ʁ j u''' )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
_snake_case = '''Hello how Are you'''
_snake_case = '''hello how are you'''
_snake_case = tokenizer(__lowerCamelCase ).input_ids
_snake_case = tokenizer(__lowerCamelCase ).input_ids
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
_snake_case = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
_snake_case = tokenizer.batch_decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = [d[key] for d in offsets]
return retrieved_list
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_snake_case = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
_snake_case = tokenizer.decode(__lowerCamelCase , output_char_offsets=__lowerCamelCase , filter_word_delimiter_token=__lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(__lowerCamelCase , __lowerCamelCase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7] )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(__lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
self.assertTrue(isinstance(__lowerCamelCase , __lowerCamelCase ) )
self.assertTrue(isinstance(outputs_list[0] , __lowerCamelCase ) )
# transform list to ModelOutput
_snake_case = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
[recursive_check(__lowerCamelCase , __lowerCamelCase ) for la, la in zip(__lowerCamelCase , __lowerCamelCase )]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
_snake_case = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_snake_case = tokenizer.batch_decode(__lowerCamelCase , output_char_offsets=__lowerCamelCase )
_snake_case = [tokenizer.decode(__lowerCamelCase , output_char_offsets=__lowerCamelCase ) for ids in sample_ids]
check_list_tuples_equal(__lowerCamelCase , __lowerCamelCase )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_snake_case = tokenizer.vocab_size
_snake_case = len(__lowerCamelCase )
self.assertNotEqual(__lowerCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_snake_case = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
_snake_case = tokenizer.add_tokens(__lowerCamelCase )
_snake_case = tokenizer.vocab_size
_snake_case = len(__lowerCamelCase )
self.assertNotEqual(__lowerCamelCase , 0 )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , len(__lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , all_size + len(__lowerCamelCase ) )
_snake_case = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowerCamelCase )
self.assertGreaterEqual(len(__lowerCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_snake_case = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
_snake_case = tokenizer.add_special_tokens(__lowerCamelCase )
_snake_case = tokenizer.vocab_size
_snake_case = len(__lowerCamelCase )
self.assertNotEqual(__lowerCamelCase , 0 )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , len(__lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , all_size_a + len(__lowerCamelCase ) )
_snake_case = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowerCamelCase )
self.assertGreaterEqual(len(__lowerCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
_snake_case = self.get_tokenizers(fast=__lowerCamelCase , do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_snake_case = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
_snake_case = tokenizer.convert_tokens_to_string(__lowerCamelCase )
self.assertIsInstance(output['''text'''] , __lowerCamelCase )
| 103 |
"""simple docstring"""
import operator
def A_ (__a , __a = False , __a = None ):
'''simple docstring'''
A_ = operator.lt if reverse else operator.gt
A_ = solution or []
if not arr:
return solution
A_ = [arr.pop(0 )]
for i, item in enumerate(__a ):
if _operator(__a , sublist[-1] ):
sublist.append(__a )
arr.pop(__a )
# merging sublist into solution list
if not solution:
solution.extend(__a )
else:
while sublist:
A_ = sublist.pop(0 )
for i, xx in enumerate(__a ):
if not _operator(__a , __a ):
solution.insert(__a , __a )
break
else:
solution.append(__a )
strand_sort(__a , __a , __a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 115 | 0 |
"""simple docstring"""
from math import ceil
def A__ ( UpperCamelCase = 1_001 ):
A = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
A = 2 * i + 1
A = 2 * i
A = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_snake_case : Union[str, Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 524 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = 'T5Config'
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
| 524 | 1 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase__ :Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : List[Any] = ['pixel_values']
def __init__( self : Optional[Any] , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : bool = True , __lowercase : int = 8 , **__lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**__lowercase )
__UpperCAmelCase : Union[str, Any] = do_rescale
__UpperCAmelCase : Dict = rescale_factor
__UpperCAmelCase : List[str] = do_pad
__UpperCAmelCase : int = pad_size
def A_ ( self : int , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Union[str, Any] ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def A_ ( self : int , __lowercase : np.ndarray , __lowercase : int , __lowercase : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : List[Any] = get_image_size(__lowercase )
__UpperCAmelCase : int = (old_height // size + 1) * size - old_height
__UpperCAmelCase : str = (old_width // size + 1) * size - old_width
return pad(__lowercase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=__lowercase )
def A_ ( self : Union[str, Any] , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[int] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : str , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : List[str] = do_pad if do_pad is not None else self.do_pad
__UpperCAmelCase : List[Any] = pad_size if pad_size is not None else self.pad_size
__UpperCAmelCase : Optional[Any] = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase : Optional[int] = [to_numpy_array(__lowercase ) for image in images]
if do_rescale:
__UpperCAmelCase : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_pad:
__UpperCAmelCase : Union[str, Any] = [self.pad(__lowercase , size=__lowercase ) for image in images]
__UpperCAmelCase : List[str] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__UpperCAmelCase : int = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 522 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class snake_case :
'''simple docstring'''
def __init__( self : int , __lowercase : Dict , __lowercase : int=13 , __lowercase : str=7 , __lowercase : List[str]=True , __lowercase : Union[str, Any]=True , __lowercase : List[Any]=True , __lowercase : Optional[int]=True , __lowercase : Dict=99 , __lowercase : int=64 , __lowercase : Dict=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[Any]=37 , __lowercase : Dict="gelu" , __lowercase : int=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Tuple=512 , __lowercase : List[str]=16 , __lowercase : Dict=2 , __lowercase : int=0.0_2 , __lowercase : Dict=3 , __lowercase : List[str]=4 , __lowercase : Optional[int]=None , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Any = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : Optional[Any] = use_token_type_ids
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = embedding_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Tuple = num_labels
__UpperCAmelCase : List[str] = num_choices
__UpperCAmelCase : Optional[int] = scope
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_input_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
__UpperCAmelCase : Dict = None
if self.use_labels:
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : List[Any] ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def A_ ( self : List[str] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : Dict , __lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MobileBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
__UpperCAmelCase : int = model(__lowercase , token_type_ids=__lowercase )
__UpperCAmelCase : Tuple = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Tuple , __lowercase : int , __lowercase : Optional[int] , __lowercase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = MobileBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Dict , __lowercase : str , __lowercase : Dict , __lowercase : Tuple , __lowercase : Dict , __lowercase : List[Any] , __lowercase : Any , __lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = MobileBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Tuple = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A_ ( self : List[Any] , __lowercase : List[Any] , __lowercase : Any , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = MobileBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[str] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A_ ( self : str , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : str = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : List[str] , __lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : Any = MobileBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Dict , __lowercase : int , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : int = MobileBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : int = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Tuple , __lowercase : str , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : List[str] , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.num_choices
__UpperCAmelCase : List[Any] = MobileBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Dict = config_and_inputs
__UpperCAmelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Dict = True
def A_ ( self : Any , __lowercase : List[Any] , __lowercase : Dict , __lowercase : Union[str, Any]=False ):
'''simple docstring'''
__UpperCAmelCase : Any = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
__UpperCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
__UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : str = MobileBertModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def A_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowercase )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowercase )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowercase )
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowercase )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowercase )
def A_ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowercase )
def A_ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowercase )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowercase )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->List[Any]:
"""simple docstring"""
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowercase__ :Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(__lowercase )
__UpperCAmelCase : Optional[int] = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__UpperCAmelCase : str = model(__lowercase )[0]
__UpperCAmelCase : Any = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowercase )
__UpperCAmelCase : int = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] , device=__lowercase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__UpperCAmelCase : str = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__UpperCAmelCase : List[Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 522 | 1 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_SCREAMING_SNAKE_CASE : List[str] = numpy.array([0, 0])
_SCREAMING_SNAKE_CASE : Tuple = numpy.array([0.5, 0.8660254])
_SCREAMING_SNAKE_CASE : Any = numpy.array([1, 0])
_SCREAMING_SNAKE_CASE : Tuple = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
_lowercase: int = initial_vectors
for _ in range(__magic_name__ ):
_lowercase: List[str] = iteration_step(__magic_name__ )
return vectors
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Optional[int] = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowercase: Optional[Any] = vectors[i + 1]
new_vectors.append(__magic_name__ )
_lowercase: List[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
_lowercase: int = numpy.radians(__magic_name__ )
_lowercase , _lowercase: Tuple = numpy.cos(__magic_name__ ), numpy.sin(__magic_name__ )
_lowercase: int = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__magic_name__ , __magic_name__ )
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: int = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowercase , _lowercase: List[str] = zip(*__magic_name__ )
plt.plot(__magic_name__ , __magic_name__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE : int = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 206 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_SCREAMING_SNAKE_CASE : int = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_SCREAMING_SNAKE_CASE : str = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_SCREAMING_SNAKE_CASE : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_SCREAMING_SNAKE_CASE : int = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_SCREAMING_SNAKE_CASE : str = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_SCREAMING_SNAKE_CASE : str = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Dict = FLAX_MODEL_MAPPING
_SCREAMING_SNAKE_CASE : Tuple = auto_class_update(FlaxAutoModel)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_SCREAMING_SNAKE_CASE : int = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : List[str] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : List[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Any = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_SCREAMING_SNAKE_CASE : List[str] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Any = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 206 | 1 |
def __UpperCAmelCase( lowercase_ ):
if num < 0:
return False
_lowerCamelCase : int = num
_lowerCamelCase : int = 0
while num > 0:
_lowerCamelCase : Dict = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 |
from __future__ import annotations
def __UpperCAmelCase( lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ):
_lowerCamelCase : Tuple = cipher_alphabet or [chr(lowercase_ ) for i in range(97 , 1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_lowerCamelCase : Dict = {
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
_lowerCamelCase : Tuple = frequencies_dict
if not case_sensitive:
_lowerCamelCase : Optional[int] = ciphertext.lower()
# Chi squared statistic values
_lowerCamelCase : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase_ ) ):
_lowerCamelCase : Any = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_lowerCamelCase : int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_lowerCamelCase : Optional[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_lowerCamelCase : Any = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_lowerCamelCase : List[str] = decrypted_with_shift.lower().count(lowercase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_lowerCamelCase : Optional[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_lowerCamelCase : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_lowerCamelCase : Union[str, Any] = decrypted_with_shift.count(lowercase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_lowerCamelCase : List[str] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_lowerCamelCase : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_lowerCamelCase : Tuple = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase_ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_lowerCamelCase : int = min(
lowercase_ , key=lowercase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_lowerCamelCase
), (
_lowerCamelCase
),
) : Optional[int] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 114 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if gpta_config_file == "":
snake_case_ = GPTaConfig()
else:
snake_case_ = GPTaConfig.from_json_file(lowercase_ )
snake_case_ = GPTaModel(lowercase_ )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
snake_case_ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
snake_case_ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , lowercase_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
lowerCamelCase_ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 714 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCamelCase_ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def UpperCamelCase( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
inspect_dataset(lowercase_ , lowercase_ )
snake_case_ = path + """.py"""
assert script_name in os.listdir(lowercase_ )
assert "__pycache__" not in os.listdir(lowercase_ )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def UpperCamelCase( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
inspect_metric(lowercase_ , lowercase_ )
snake_case_ = path + """.py"""
assert script_name in os.listdir(lowercase_ )
assert "__pycache__" not in os.listdir(lowercase_ )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
snake_case_ = get_dataset_config_info(lowercase_ , config_name=lowercase_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
with pytest.raises(lowercase_ ):
get_dataset_config_info(lowercase_ , config_name=lowercase_ )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def UpperCamelCase( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
snake_case_ = get_dataset_config_names(lowercase_ )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
snake_case_ = get_dataset_infos(lowercase_ )
assert list(infos.keys() ) == expected_configs
snake_case_ = expected_configs[0]
assert expected_config in infos
snake_case_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
snake_case_ = get_dataset_infos(lowercase_ )
assert expected_config in infos
snake_case_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
with pytest.raises(lowercase_ ):
get_dataset_split_names(lowercase_ , config_name=lowercase_ ) | 161 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= (3, 32, 128)
lowercase__ : str= tempfile.mkdtemp()
# fmt: off
lowercase__ : List[Any]= ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ : Any= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : List[str]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
lowercase__ : str= {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
lowercase__ : int= os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
lowercase__ : Union[str, Any]= Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) )
return image_input
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_tokenizer()
lowercase__ : Dict= self.get_image_processor()
lowercase__ : Dict= MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor.save_pretrained(self.tmpdirname )
lowercase__ : int= MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.get_tokenizer()
lowercase__ : Tuple= self.get_image_processor()
lowercase__ : List[Any]= MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Tuple= self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : int= self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
lowercase__ : Tuple= MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.get_image_processor()
lowercase__ : Tuple= self.get_tokenizer()
lowercase__ : Any= MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : str= self.prepare_image_inputs()
lowercase__ : Dict= image_processor(snake_case__ , return_tensors="np" )
lowercase__ : List[str]= processor(images=snake_case__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.get_image_processor()
lowercase__ : Any= self.get_tokenizer()
lowercase__ : List[Any]= MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : List[str]= "test"
lowercase__ : int= processor(text=snake_case__ )
lowercase__ : List[Any]= tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_image_processor()
lowercase__ : Any= self.get_tokenizer()
lowercase__ : Union[str, Any]= MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Optional[int]= "test"
lowercase__ : Any= self.prepare_image_inputs()
lowercase__ : Dict= processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_image_processor()
lowercase__ : int= self.get_tokenizer()
lowercase__ : Union[str, Any]= MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Tuple= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Tuple= processor.char_decode(snake_case__ )
lowercase__ : List[Any]= tokenizer.batch_decode(snake_case__ )
lowercase__ : Tuple= [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= self.get_image_processor()
lowercase__ : List[str]= self.get_tokenizer()
lowercase__ : Dict= MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : List[Any]= None
lowercase__ : Union[str, Any]= self.prepare_image_inputs()
lowercase__ : int= processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= self.get_image_processor()
lowercase__ : Dict= self.get_tokenizer()
lowercase__ : str= MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Optional[int]= torch.randn(1 , 27 , 38 )
lowercase__ : Tuple= torch.randn(1 , 27 , 50257 )
lowercase__ : Optional[Any]= torch.randn(1 , 27 , 30522 )
lowercase__ : Tuple= processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 218 |
"""simple docstring"""
a : str = range(2, 20 + 1)
a : Optional[Any] = [10**k for k in range(ks[-1] + 1)]
a : dict[int, dict[int, list[list[int]]]] = {}
def lowercase__(A , A , A , A ) ->Any:
"""simple docstring"""
lowercase__ : str= sum(a_i[j] for j in range(A , len(A ) ) )
lowercase__ : int= sum(a_i[j] * base[j] for j in range(min(len(A ) , A ) ) )
lowercase__, lowercase__ : Optional[Any]= 0, 0
lowercase__ : Any= n - i
lowercase__ : Union[str, Any]= memo.get(A )
if sub_memo is not None:
lowercase__ : List[str]= sub_memo.get(A )
if jumps is not None and len(A ) > 0:
# find and make the largest jump without going over
lowercase__ : List[str]= -1
for _k in range(len(A ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase__ : Any= _k
break
if max_jump >= 0:
lowercase__, lowercase__, lowercase__ : str= jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase__ : List[Any]= diff + c
for j in range(min(A , len(A ) ) ):
lowercase__, lowercase__ : Union[str, Any]= divmod(A , 10 )
if new_c > 0:
add(A , A , A )
else:
lowercase__ : Any= []
else:
lowercase__ : List[str]= {c: []}
lowercase__ : Union[str, Any]= sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase__, lowercase__ : Optional[int]= next_term(A , k - 1 , i + dn , A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase__, lowercase__ : str= compute(A , A , i + dn , A )
diff += _diff
dn += terms_jumped
lowercase__ : Optional[Any]= sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase__ : Dict= 0
while j < len(A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A , (diff, dn, k) )
return (diff, dn)
def lowercase__(A , A , A , A ) ->Optional[Any]:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(A ):
a_i.extend([0 for _ in range(k - len(A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase__ : int= i
lowercase__, lowercase__, lowercase__ : Union[str, Any]= 0, 0, 0
for j in range(len(A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase__ : Tuple= ds_c + ds_b
diff += addend
lowercase__ : List[Any]= 0
for j in range(A ):
lowercase__ : int= a_i[j] + addend
lowercase__, lowercase__ : Any= divmod(A , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A , A , A )
return diff, i - start_i
def lowercase__(A , A , A ) ->Any:
"""simple docstring"""
for j in range(A , len(A ) ):
lowercase__ : List[str]= digits[j] + addend
if s >= 10:
lowercase__, lowercase__ : str= divmod(A , 10 )
lowercase__ : Optional[int]= addend // 10 + quotient
else:
lowercase__ : int= s
lowercase__ : Union[str, Any]= addend // 10
if addend == 0:
break
while addend > 0:
lowercase__, lowercase__ : str= divmod(A , 10 )
digits.append(A )
def lowercase__(A = 10**15 ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= [1]
lowercase__ : Dict= 1
lowercase__ : List[Any]= 0
while True:
lowercase__, lowercase__ : List[str]= next_term(A , 20 , i + dn , A )
dn += terms_jumped
if dn == n - i:
break
lowercase__ : int= 0
for j in range(len(A ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 218 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "hidden_sizes"))
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "num_attention_heads"))
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Tuple=64 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Union[str, Any]=1 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : Any=[128, 256, 384] , UpperCAmelCase_ : Dict=[4, 6, 8] , UpperCAmelCase_ : Union[str, Any]=[2, 3, 4] , UpperCAmelCase_ : Tuple=[16, 16, 16] , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : int=[2, 2, 2] , UpperCAmelCase_ : str=[2, 2, 2] , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str=2 , ) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =parent
lowerCamelCase__: Optional[Any] =batch_size
lowerCamelCase__: Union[str, Any] =image_size
lowerCamelCase__: Dict =num_channels
lowerCamelCase__: List[Any] =kernel_size
lowerCamelCase__: List[str] =stride
lowerCamelCase__: str =padding
lowerCamelCase__: Union[str, Any] =hidden_sizes
lowerCamelCase__: Optional[int] =num_attention_heads
lowerCamelCase__: List[str] =depths
lowerCamelCase__: Any =key_dim
lowerCamelCase__: Any =drop_path_rate
lowerCamelCase__: List[str] =patch_size
lowerCamelCase__: Optional[Any] =attention_ratio
lowerCamelCase__: str =mlp_ratio
lowerCamelCase__: Tuple =initializer_range
lowerCamelCase__: Optional[int] =[
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowerCamelCase__: Optional[int] =is_training
lowerCamelCase__: List[str] =use_labels
lowerCamelCase__: List[Any] =num_labels
lowerCamelCase__: Any =initializer_range
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: int =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: str =None
if self.use_labels:
lowerCamelCase__: Any =ids_tensor([self.batch_size] , self.num_labels)
lowerCamelCase__: List[str] =self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =LevitModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =(self.image_size, self.image_size)
lowerCamelCase__ , lowerCamelCase__: List[str] =image_size[0], image_size[1]
for _ in range(4):
lowerCamelCase__: List[Any] =floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1)
lowerCamelCase__: Optional[Any] =floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]) , )
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: str =self.num_labels
lowerCamelCase__: Dict =LevitForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: str =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =config_and_inputs
lowerCamelCase__: Optional[int] ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =LevitModelTester(self)
lowerCamelCase__: List[Any] =ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
return
@unittest.skip(reason="Levit does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not support input and output embeddings")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not output attentions")
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Tuple =model_class(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Any =[*signature.parameters.keys()]
lowerCamelCase__: str =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[str]:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]):
lowerCamelCase__: List[str] =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: Any =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: Dict =outputs.hidden_states
lowerCamelCase__: Optional[int] =len(self.model_tester.depths) + 1
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =(self.model_tester.image_size, self.model_tester.image_size)
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =image_size[0], image_size[1]
for _ in range(4):
lowerCamelCase__: Optional[int] =floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
lowerCamelCase__: List[Any] =floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: List[str] =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__: List[Any] =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str]=False) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCamelCase__ , lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__: int =True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCAmelCase_)
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.train()
lowerCamelCase__: Tuple =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
lowerCamelCase__: List[Any] =model(**UpperCAmelCase_).loss
loss.backward()
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCamelCase__: Optional[Any] =False
lowerCamelCase__: Optional[Any] =True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCAmelCase_) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowerCamelCase__: Dict =model_class(UpperCAmelCase_)
model.gradient_checkpointing_enable()
model.to(UpperCAmelCase_)
model.train()
lowerCamelCase__: int =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
lowerCamelCase__: str =model(**UpperCAmelCase_).loss
loss.backward()
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__: str =[
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCAmelCase_),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}"""):
lowerCamelCase__: int =problem_type["title"]
lowerCamelCase__: Optional[int] =problem_type["num_labels"]
lowerCamelCase__: Any =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.train()
lowerCamelCase__: Optional[Any] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if problem_type["num_labels"] > 1:
lowerCamelCase__: str =inputs["labels"].unsqueeze(1).repeat(1 , problem_type["num_labels"])
lowerCamelCase__: List[Any] =inputs["labels"].to(problem_type["dtype"])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCAmelCase_) as warning_list:
lowerCamelCase__: Tuple =model(**UpperCAmelCase_).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""")
loss.backward()
@slow
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: Any =LevitModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: str =LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
UpperCAmelCase_)
lowerCamelCase__: str =self.default_image_processor
lowerCamelCase__: List[Any] =prepare_img()
lowerCamelCase__: str =image_processor(images=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: List[str] =model(**UpperCAmelCase_)
# verify the logits
lowerCamelCase__: Dict =torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =torch.tensor([1.0448, -0.3745, -1.8317]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4))
| 437 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__a ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["pixel_values"]
def __init__(self : Tuple , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[Any] , ) ->None:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =size if size is not None else {"shortest_edge": 256}
lowerCamelCase__: Any =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
lowerCamelCase__: Tuple =crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase__: Any =get_size_dict(UpperCAmelCase_ , param_name="crop_size")
lowerCamelCase__: str =do_resize
lowerCamelCase__: Any =size
lowerCamelCase__: Any =do_center_crop
lowerCamelCase__: int =crop_size
lowerCamelCase__: int =resample
lowerCamelCase__: Optional[int] =do_rescale
lowerCamelCase__: int =rescale_factor
lowerCamelCase__: Dict =offset
lowerCamelCase__: List[Any] =do_normalize
lowerCamelCase__: Optional[int] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__: List[str] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[Any] , ) ->np.ndarray:
'''simple docstring'''
lowerCamelCase__: Dict =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" in size:
lowerCamelCase__: Union[str, Any] =get_resize_output_image_size(UpperCAmelCase_ , size["shortest_edge"] , default_to_square=UpperCAmelCase_)
elif "height" in size and "width" in size:
lowerCamelCase__: int =(size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""")
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[Any] , ) ->np.ndarray:
'''simple docstring'''
lowerCamelCase__: List[str] =get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: str =image.astype(np.floataa)
if offset:
lowerCamelCase__: List[Any] =image - (scale / 2)
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) ->np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True.")
# All transformations expect numpy arrays.
lowerCamelCase__: List[str] =to_numpy_array(UpperCAmelCase_)
if do_resize:
lowerCamelCase__: Optional[Any] =self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_)
if do_center_crop:
lowerCamelCase__: str =self.center_crop(UpperCAmelCase_ , size=UpperCAmelCase_)
if do_rescale:
lowerCamelCase__: List[str] =self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ , offset=UpperCAmelCase_)
if do_normalize:
lowerCamelCase__: Dict =self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_)
lowerCamelCase__: Any =to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_)
return image
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ) ->PIL.Image.Image:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =do_resize if do_resize is not None else self.do_resize
lowerCamelCase__: Optional[int] =resample if resample is not None else self.resample
lowerCamelCase__: Optional[int] =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__: List[str] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__: Dict =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__: Union[str, Any] =offset if offset is not None else self.offset
lowerCamelCase__: Dict =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__: Optional[int] =image_mean if image_mean is not None else self.image_mean
lowerCamelCase__: List[Any] =image_std if image_std is not None else self.image_std
lowerCamelCase__: List[Any] =size if size is not None else self.size
lowerCamelCase__: Optional[int] =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
lowerCamelCase__: str =crop_size if crop_size is not None else self.crop_size
lowerCamelCase__: Optional[Any] =get_size_dict(UpperCAmelCase_ , param_name="crop_size")
if not valid_images(UpperCAmelCase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
lowerCamelCase__: str =make_batched(UpperCAmelCase_)
lowerCamelCase__: Any =[
[
self._preprocess_image(
image=UpperCAmelCase_ , do_resize=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , do_center_crop=UpperCAmelCase_ , crop_size=UpperCAmelCase_ , do_rescale=UpperCAmelCase_ , rescale_factor=UpperCAmelCase_ , offset=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , )
for img in video
]
for video in videos
]
lowerCamelCase__: int ={"pixel_values": videos}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 437 | 1 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A__ : Any = """src/transformers"""
A__ : Union[str, Any] = """docs/source/en/tasks"""
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ) -> List[str]:
with open(UpperCAmelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Optional[int] = f.readlines()
# Find the start prompt.
__lowerCamelCase : str = 0
while not lines[start_index].startswith(UpperCAmelCase_ ):
start_index += 1
start_index += 1
__lowerCamelCase : List[str] = start_index
while not lines[end_index].startswith(UpperCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A__ : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
A__ : int = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A__ : List[Any] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> List[str]:
__lowerCamelCase : Dict = TASK_GUIDE_TO_MODELS[task_guide]
__lowerCamelCase : Tuple = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCAmelCase_ , set() )
__lowerCamelCase : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict=False ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = _find_text_in_file(
filename=os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
__lowerCamelCase : Optional[Any] = get_model_list_for_task(UpperCAmelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
' to fix this.' )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 13 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__a : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : bool = field(default=snake_case_ , metadata={"""help""": """Whether to use SortishSampler or not."""} )
a : bool = field(
default=snake_case_ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
a : Optional[int] = field(
default=snake_case_ , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
a : Optional[int] = field(
default=snake_case_ , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
a : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : List[str] = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
lowercase__ : Union[str, Any] = v.to_dict()
return d | 397 | 0 |
from __future__ import annotations
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : List[str] = sorted(numsa + numsa )
UpperCAmelCase_ : Dict = divmod(len(__UpperCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : int = [float(x) for x in input('Enter the elements of first array: ').split()]
__UpperCamelCase : Dict = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}') | 721 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case ) | 641 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A__ ( unittest.TestCase ):
lowercase = MODEL_FOR_CAUSAL_LM_MAPPING
lowercase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
A_ = text_generator("""This is a test""" , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
A_ = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
UpperCamelCase__ , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
A_ = text_generator("""This is a test""" , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"""generated_token_ids""": ANY(UpperCamelCase__ )},
{"""generated_token_ids""": ANY(UpperCamelCase__ )},
] , )
A_ = text_generator.model.config.eos_token_id
A_ = """<pad>"""
A_ = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{"""generated_token_ids""": ANY(UpperCamelCase__ )},
{"""generated_token_ids""": ANY(UpperCamelCase__ )},
],
[
{"""generated_token_ids""": ANY(UpperCamelCase__ )},
{"""generated_token_ids""": ANY(UpperCamelCase__ )},
],
] , )
@require_tf
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
A_ = text_generator("""This is a test""" , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
A_ = text_generator(["""This is a test""", """This is a second test"""] , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = TextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return text_generator, ["This is a test", "Another test"]
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = """Hello I believe in"""
A_ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
A_ = text_generator(UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
A_ = text_generator(UpperCamelCase__ , stop_sequence=""" fe""" )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """Hello I believe in fe"""}] )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = text_generator.model
A_ = text_generator.tokenizer
A_ = text_generator("""This is a test""" )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
A_ = text_generator("""This is a test""" , return_full_text=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
A_ = pipeline(task="""text-generation""" , model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , return_full_text=UpperCamelCase__ )
A_ = text_generator("""This is a test""" )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
A_ = text_generator("""This is a test""" , return_full_text=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
A_ = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
A_ = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
A_ = text_generator("""test""" , return_full_text=UpperCamelCase__ , return_text=UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ ):
A_ = text_generator("""test""" , return_full_text=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ ):
A_ = text_generator("""test""" , return_text=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
A_ = text_generator("""""" )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
A_ = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
A_ = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
A_ = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(UpperCamelCase__ ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def snake_case_ ( self ) -> int:
'''simple docstring'''
import torch
# Classic `model_kwargs`
A_ = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
A_ = pipe("""This is a test""" )
self.assertEqual(
UpperCamelCase__ , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
A_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
A_ = pipe("""This is a test""" )
self.assertEqual(
UpperCamelCase__ , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
A_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
A_ = pipe("""This is a test""" )
self.assertEqual(
UpperCamelCase__ , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def snake_case_ ( self ) -> int:
'''simple docstring'''
import torch
A_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
import torch
A_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=UpperCamelCase__ , top_p=0.5 )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = """Hello world"""
A_ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
A_ = logging.get_logger("""transformers.generation.tf_utils""" )
else:
A_ = logging.get_logger("""transformers.generation.utils""" )
A_ = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(UpperCamelCase__ ) as cl:
A_ = text_generator(UpperCamelCase__ , max_length=10 , max_new_tokens=1 )
self.assertIn(UpperCamelCase__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(UpperCamelCase__ ) as cl:
A_ = text_generator(UpperCamelCase__ , max_new_tokens=1 )
self.assertNotIn(UpperCamelCase__ , cl.out )
with CaptureLogger(UpperCamelCase__ ) as cl:
A_ = text_generator(UpperCamelCase__ , max_length=10 )
self.assertNotIn(UpperCamelCase__ , cl.out )
| 288 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__lowerCamelCase = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def UpperCAmelCase__ ( UpperCAmelCase__=None ) -> List[str]:
if subparsers is not None:
A_ = subparsers.add_parser("""tpu-config""", description=_description )
else:
A_ = argparse.ArgumentParser("""Accelerate tpu-config command""", description=_description )
# Core arguments
A_ = parser.add_argument_group(
"""Config Arguments""", """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""", type=UpperCAmelCase__, default=UpperCAmelCase__, help="""Path to the config file to use for accelerate.""", )
config_args.add_argument(
"""--tpu_name""", default=UpperCAmelCase__, help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""", )
config_args.add_argument(
"""--tpu_zone""", default=UpperCAmelCase__, help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""", )
A_ = parser.add_argument_group("""TPU Arguments""", """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""", action="""store_true""", help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""", )
pod_args.add_argument(
"""--command_file""", default=UpperCAmelCase__, help="""The path to the file containing the commands to run on the pod on startup.""", )
pod_args.add_argument(
"""--command""", action="""append""", nargs="""+""", help="""A command to run on the pod. Can be passed multiple times.""", )
pod_args.add_argument(
"""--install_accelerate""", action="""store_true""", help="""Whether to install accelerate on the pod. Defaults to False.""", )
pod_args.add_argument(
"""--accelerate_version""", default="""latest""", help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""", )
pod_args.add_argument(
"""--debug""", action="""store_true""", help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
A_ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
A_ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
A_ = defaults.command_file
if not args.command and defaults.commands is not None:
A_ = defaults.commands
if not args.tpu_name:
A_ = defaults.tpu_name
if not args.tpu_zone:
A_ = defaults.tpu_zone
if args.accelerate_version == "dev":
A_ = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
A_ = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ), UpperCAmelCase__ ):
A_ = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file, """r""" ) as f:
A_ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0], UpperCAmelCase__ ):
A_ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
A_ = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
A_ = """; """.join(UpperCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
A_ = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {" ".join(UpperCAmelCase__ )}''' )
return
subprocess.run(UpperCAmelCase__ )
print("""Successfully setup pod.""" )
def UpperCAmelCase__ ( ) -> int:
A_ = tpu_command_parser()
A_ = parser.parse_args()
tpu_command_launcher(UpperCAmelCase__ )
| 288 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def UpperCamelCase ( self ):
lowercase_ :int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''depth_multiplier''' ) )
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=3 , UpperCamelCase_=32 , UpperCamelCase_=0.25 , UpperCamelCase_=8 , UpperCamelCase_=8 , UpperCamelCase_=6 , UpperCamelCase_=32 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_="relu6" , UpperCamelCase_=1280 , UpperCamelCase_=0.1 , UpperCamelCase_=0.02 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=10 , UpperCamelCase_=None , ):
lowercase_ :Optional[Any] = parent
lowercase_ :Tuple = batch_size
lowercase_ :List[Any] = num_channels
lowercase_ :Tuple = image_size
lowercase_ :Any = depth_multiplier
lowercase_ :List[Any] = depth_divisible_by
lowercase_ :Any = min_depth
lowercase_ :Tuple = expand_ratio
lowercase_ :List[str] = tf_padding
lowercase_ :Dict = output_stride
lowercase_ :Optional[Any] = first_layer_is_expansion
lowercase_ :Optional[Any] = finegrained_output
lowercase_ :List[Any] = hidden_act
lowercase_ :int = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowercase_ :Dict = classifier_dropout_prob
lowercase_ :Tuple = use_labels
lowercase_ :List[Any] = is_training
lowercase_ :Tuple = num_labels
lowercase_ :str = initializer_range
lowercase_ :List[str] = scope
def UpperCamelCase ( self ):
lowercase_ :Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ :str = None
lowercase_ :Dict = None
if self.use_labels:
lowercase_ :Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ :str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ :str = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = MobileNetVaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase_ :Optional[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = self.num_labels
lowercase_ :Tuple = MobileNetVaForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase_ :Any = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :List[Any] = self.num_labels
lowercase_ :List[str] = MobileNetVaForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase_ :List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase_ :List[Any] = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Tuple = config_and_inputs
lowercase_ :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : str =(
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase : Union[str, Any] =(
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase : str =False
lowercase : Dict =False
lowercase : Optional[Any] =False
lowercase : Dict =False
def UpperCamelCase ( self ):
lowercase_ :List[Any] = MobileNetVaModelTester(self )
lowercase_ :Union[str, Any] = MobileNetVaConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def UpperCamelCase ( self ):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def UpperCamelCase ( self ):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :List[str] = model_class(UpperCamelCase_ )
lowercase_ :List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ :Tuple = [*signature.parameters.keys()]
lowercase_ :int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def UpperCamelCase ( self ):
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowercase_ :Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ :List[Any] = outputs.hidden_states
lowercase_ :Union[str, Any] = 16
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
lowercase_ , lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Optional[Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ :int = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
@slow
def UpperCamelCase ( self ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :int = MobileNetVaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
lowercase_ :Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase ( self ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(UpperCamelCase_ )
lowercase_ :Optional[int] = self.default_image_processor
lowercase_ :Optional[Any] = prepare_img()
lowercase_ :Optional[Any] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowercase_ :Any = model(**UpperCamelCase_ )
# verify the logits
lowercase_ :Tuple = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase_ :Optional[Any] = torch.tensor([0.2445, -1.1993, 0.1905] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self ):
lowercase_ :int = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowercase_ :Optional[int] = model.to(UpperCamelCase_ )
lowercase_ :int = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowercase_ :Optional[Any] = prepare_img()
lowercase_ :Optional[Any] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowercase_ :Optional[Any] = model(**UpperCamelCase_ )
lowercase_ :Any = outputs.logits
# verify the logits
lowercase_ :Any = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , UpperCamelCase_ )
lowercase_ :Tuple = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=UpperCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 441 |
def UpperCamelCase ( _a , _a , _a ) -> int:
'''simple docstring'''
def count_of_possible_combinations(_a ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def UpperCamelCase ( _a , _a , _a ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a , _a ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ :Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowercase_ :List[Any] = answer
return answer
lowercase_ :Dict = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def UpperCamelCase ( _a , _a , _a ) -> int:
'''simple docstring'''
lowercase_ :Optional[int] = [0] * (target + 1)
lowercase_ :int = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Optional[Any] = 3
SCREAMING_SNAKE_CASE : Tuple = 5
SCREAMING_SNAKE_CASE : List[Any] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 441 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _UpperCAmelCase :
def A ( self : Dict , A : Tuple , A : Any , A : str ) -> List[Any]:
return None
class _UpperCAmelCase :
def A ( self : int , A : List[str] , A : List[Any] , A : Union[str, Any] , A : List[str] ) -> Dict:
return None
class _UpperCAmelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def A ( self : int ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(A , '''tf''' , 12 , **A )
@require_torch
@slow
def A ( self : str ) -> Any:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(A , '''pt''' , 12 , **A )
@require_torch
@slow
def A ( self : Tuple ) -> Optional[Any]:
from transformers import BertModel
lowercase_ : Any = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(A ) )
vocab_file.flush()
lowercase_ : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ : List[Any] = BertModel(BertConfig(vocab_size=len(A ) ) )
model.save_pretrained(A )
self._test_export(A , '''pt''' , 12 , A )
@require_tf
@slow
def A ( self : List[Any] ) -> Any:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Dict = self._test_export(A , '''tf''' , 12 , **A )
lowercase_ : List[str] = quantize(Path(A ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(A ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def A ( self : List[str] ) -> Optional[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : List[Any] = self._test_export(A , '''pt''' , 12 , **A )
lowercase_ : Union[str, Any] = quantize(A )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(A ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def A ( self : Any , A : Union[str, Any] , A : Tuple , A : Tuple , A : List[Any]=None , **A : Any ) -> str:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ : Union[str, Any] = Path(A ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(A , A , A , A , A , **A )
return path
except Exception as e:
self.fail(A )
@require_torch
@require_tokenizers
@slow
def A ( self : Union[str, Any] ) -> Union[str, Any]:
from transformers import BertModel
lowercase_ : Dict = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
lowercase_ : int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(A , A , '''pt''' )
@require_tf
@require_tokenizers
@slow
def A ( self : Dict ) -> Union[str, Any]:
from transformers import TFBertModel
lowercase_ : List[str] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
lowercase_ : List[str] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(A , A , '''tf''' )
def A ( self : Dict , A : Dict , A : Any , A : int ) -> Union[str, Any]:
lowercase_ : Optional[int] = FeatureExtractionPipeline(A , A )
lowercase_ : Any = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = infer_shapes(A , A )
# Assert all variables are present
self.assertEqual(len(A ) , len(A ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , A )
self.assertSequenceEqual(variable_names[3:] , A )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def A ( self : Optional[int] ) -> str:
lowercase_ : List[Any] = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
lowercase_ : Any = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
lowercase_ , lowercase_ : List[Any] = ensure_valid_input(FuncContiguousArgs() , A , A )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(A ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(A ) , set(A ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(A , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ : Any = ensure_valid_input(FuncNonContiguousArgs() , A , A )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(A ) , 1 )
self.assertEqual(len(A ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def A ( self : Tuple ) -> Any:
lowercase_ : Optional[Any] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 231 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : str = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Tuple = "timesformer"
def __init__( self : Optional[Any] , A : Tuple=2_24 , A : Optional[int]=16 , A : Any=3 , A : str=8 , A : Optional[Any]=7_68 , A : Dict=12 , A : Optional[int]=12 , A : Optional[Any]=30_72 , A : Optional[Any]="gelu" , A : Union[str, Any]=0.0 , A : Dict=0.0 , A : str=0.02 , A : Union[str, Any]=1e-6 , A : Union[str, Any]=True , A : Dict="divided_space_time" , A : Optional[Any]=0 , **A : List[str] , ) -> Tuple:
super().__init__(**A )
lowercase_ : Tuple = image_size
lowercase_ : str = patch_size
lowercase_ : Tuple = num_channels
lowercase_ : Optional[Any] = num_frames
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : str = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : List[Any] = initializer_range
lowercase_ : List[Any] = layer_norm_eps
lowercase_ : List[str] = qkv_bias
lowercase_ : Any = attention_type
lowercase_ : Dict = drop_path_rate
| 231 | 1 |
"""simple docstring"""
import string
def __snake_case ( SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = num - key
if num < 0:
_lowerCAmelCase = num + len(string.ascii_uppercase )
_lowerCAmelCase = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = input('Encrypted message: ' )
_lowerCAmelCase = message.upper()
decrypt(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 718 |
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
_snake_case = getLogger(__name__)
_snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def __snake_case ( SCREAMING_SNAKE_CASE: List[str] , SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: int = 8 , SCREAMING_SNAKE_CASE: str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE: Optional[int]=False , SCREAMING_SNAKE_CASE: int="summarization" , SCREAMING_SNAKE_CASE: List[Any]=None , **SCREAMING_SNAKE_CASE: Tuple , ):
"""simple docstring"""
_lowerCAmelCase = Path(SCREAMING_SNAKE_CASE ).open('w' , encoding='utf-8' )
_lowerCAmelCase = str(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
if fpaa:
_lowerCAmelCase = model.half()
_lowerCAmelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if prefix is None:
_lowerCAmelCase = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ):
_lowerCAmelCase = [prefix + text for text in examples_chunk]
_lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE , padding='longest' ).to(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE , )
_lowerCAmelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
_lowerCAmelCase = int(time.time() - start_time ) # seconds
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __snake_case ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def __snake_case ( SCREAMING_SNAKE_CASE: Any=True ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('model_name' , type=SCREAMING_SNAKE_CASE , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=SCREAMING_SNAKE_CASE , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=SCREAMING_SNAKE_CASE , help='where to save summaries' )
parser.add_argument('--reference_path' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=SCREAMING_SNAKE_CASE , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=SCREAMING_SNAKE_CASE , default=8 , required=SCREAMING_SNAKE_CASE , help='batch size' )
parser.add_argument(
'--n_obs' , type=SCREAMING_SNAKE_CASE , default=-1 , required=SCREAMING_SNAKE_CASE , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=SCREAMING_SNAKE_CASE , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_lowerCAmelCase , _lowerCAmelCase = parser.parse_known_args()
_lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_lowerCAmelCase = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
_lowerCAmelCase = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE , )
if args.reference_path is None:
return {}
# Compute scores
_lowerCAmelCase = calculate_bleu if 'translation' in args.task else calculate_rouge
_lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
_lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE )]
_lowerCAmelCase = score_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
scores.update(SCREAMING_SNAKE_CASE )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE )
if args.info:
_lowerCAmelCase = args.info
if verbose:
print(SCREAMING_SNAKE_CASE )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 491 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = ['image_processor', 'tokenizer']
__lowercase : str = 'AutoImageProcessor'
__lowercase : Dict = 'AutoTokenizer'
def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ):
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case__ = kwargs.pop('''feature_extractor''' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
snake_case__ = self.image_processor
snake_case__ = False
def __call__( self:Optional[int] , *_a:str , **_a:int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
snake_case__ = kwargs.pop('''images''' , _a )
snake_case__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case__ = self.image_processor(_a , *_a , **_a )
if text is not None:
snake_case__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
snake_case__ = True
snake_case__ = self.tokenizer
yield
snake_case__ = self.image_processor
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ):
if added_vocab is None:
snake_case__ = self.tokenizer.get_added_vocab()
snake_case__ = {}
while tokens:
snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE )
if start_token is None:
break
snake_case__ = start_token.group(1 )
snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE )
snake_case__ = start_token.group()
if end_token is None:
snake_case__ = tokens.replace(_a , '''''' )
else:
snake_case__ = end_token.group()
snake_case__ = re.escape(_a )
snake_case__ = re.escape(_a )
snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE )
if content is not None:
snake_case__ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
snake_case__ = value[0]
snake_case__ = value
else: # leaf nodes
snake_case__ = []
for leaf in content.split(r'''<sep/>''' ):
snake_case__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case__ = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
snake_case__ = output[key][0]
snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 33 |
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
snake_case__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i]
snake_case__ = []
snake_case__ = 0
snake_case__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case__ = []
snake_case__ = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case__ = i
total_time += burst_time[target_process]
completed += 1
snake_case__ = 0
snake_case__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7]
lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0]
lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 33 | 1 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case = logging.get_logger(__name__)
__snake_case = {'vocab_file': 'spiece.model'}
__snake_case = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
__snake_case = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : Tuple = VOCAB_FILES_NAMES
_a : Dict = PRETRAINED_VOCAB_FILES_MAP
_a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
lowercase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ : Union[str, Any] = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowercase__ : int = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowercase__ : str = """<|endoftext|>""" if eos_token is None else eos_token
lowercase__ : int = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowercase__ : Tuple = unk_token if pad_token is None else pad_token
lowercase__ : List[str] = eos_token if bos_token is None else bos_token
else:
lowercase__ : Union[str, Any] = """<pad>""" if pad_token is None else pad_token
lowercase__ : List[str] = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
lowercase__ : Any = do_lower_case
lowercase__ : Optional[int] = remove_space
lowercase__ : Optional[int] = keep_accents
lowercase__ : int = vocab_file
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
lowercase__ : Dict = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowercase__ : Tuple = re.compile(
F'''[{''.join(map(lowerCamelCase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self ) -> Optional[int]:
lowercase__ : Any = self.__dict__.copy()
lowercase__ : Optional[Any] = None
return state
def __setstate__( self , lowerCamelCase__ ) -> str:
lowercase__ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ : List[str] = {}
lowercase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase__( self ) -> int:
return len(self.sp_model )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> str:
lowercase__ : Dict = self.non_printing_characters_re.sub("""""" , lowerCamelCase__ )
# Normalize whitespaces
lowercase__ : str = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowercase__ : str = unicodedata.normalize("""NFC""" , lowerCamelCase__ )
return text
def UpperCAmelCase__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
lowercase__ : str = self.preprocess_text(lowerCamelCase__ )
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
return self.sp_model.PieceToId(lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> str:
return self.sp_model.IdToPiece(lowerCamelCase__ )
@staticmethod
def UpperCAmelCase__( lowerCamelCase__ ) -> str:
return out_string
def UpperCAmelCase__( self , lowerCamelCase__ ) -> str:
lowercase__ : Tuple = []
lowercase__ : Any = """"""
lowercase__ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
lowercase__ : Dict = True
lowercase__ : Optional[int] = []
else:
current_sub_tokens.append(lowerCamelCase__ )
lowercase__ : Dict = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string
def UpperCAmelCase__( self ) -> Dict[str, int]:
lowercase__ : Tuple = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : Any = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
lowercase__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[Any] = self.preprocess_text(lowerCamelCase__ )
lowercase__ : Any = self.sp_model.encode(lowerCamelCase__ )
else:
lowercase__ : Union[str, Any] = [self.preprocess_text(lowerCamelCase__ ) for t in text]
lowercase__ : Optional[Any] = self.sp_model.encode(lowerCamelCase__ )
if return_tensors is True or return_tensors == "pt":
lowercase__ : Optional[Any] = torch.tensor(lowerCamelCase__ )
return token_ids
def UpperCAmelCase__( self , lowerCamelCase__ ) -> str:
return self.sp_model.decode(lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[int]:
lowercase__ : Optional[Any] = [F'''User: {text}''' if is_user else F'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
lowercase__ : List[str] = (
F'''{self.eos_token}{self.bos_token}''' + F'''{self.bos_token}'''.join(lowerCamelCase__ ) + F'''{self.bos_token}Bot:'''
)
return self.encode(text=lowerCamelCase__ ) | 706 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def UpperCAmelCase__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> Dict:
if tokenize_kwargs is None:
lowercase__ : List[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
lowercase__ : Dict = truncation
lowercase__ : Any = tokenize_kwargs
lowercase__ : List[str] = {}
if return_tensors is not None:
lowercase__ : str = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> Dict[str, GenericTensor]:
lowercase__ : Union[str, Any] = self.framework
lowercase__ : Optional[Any] = self.tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
return model_inputs
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
lowercase__ : str = self.model(**lowerCamelCase__ )
return model_outputs
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=False ) -> Union[str, Any]:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
return super().__call__(*lowerCamelCase__ , **lowerCamelCase__ ) | 128 | 0 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
_UpperCamelCase = TOKENIZER_CLASSES
else:
_UpperCamelCase = {tokenizer_name: getattr(__snake_case, tokenizer_name + '''Fast''' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
_UpperCamelCase = TOKENIZER_CLASSES[tokenizer_name]
_UpperCamelCase = True
if checkpoint_name is None:
_UpperCamelCase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_UpperCamelCase = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
_UpperCamelCase = tokenizer_class.from_pretrained(__snake_case, force_download=__snake_case )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
_UpperCamelCase , _UpperCamelCase = checkpoint.split('''/''' )
_UpperCamelCase = os.path.join(__snake_case, __snake_case )
elif add_prefix:
_UpperCamelCase = checkpoint
_UpperCamelCase = dump_path
else:
_UpperCamelCase = None
_UpperCamelCase = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_UpperCamelCase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_UpperCamelCase = file_path.split(__snake_case )[-1][0]
if next_char == "/":
_UpperCamelCase = os.path.join(__snake_case, __snake_case )
_UpperCamelCase = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
_UpperCamelCase = tokenizer.save_pretrained(
__snake_case, legacy_format=__snake_case, filename_prefix=__snake_case )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(__snake_case )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
_a = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 19 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> int:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase : List[str] = (
F'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
F''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
lowerCamelCase : int = dict(scheduler.config )
lowerCamelCase : int = 1
lowerCamelCase : str = FrozenDict(UpperCamelCase__ )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase : Optional[int] = (
F'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
lowerCamelCase : int = dict(scheduler.config )
lowerCamelCase : Tuple = True
lowerCamelCase : List[str] = FrozenDict(UpperCamelCase__ )
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=UpperCamelCase__ , segmentation_processor=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ = "auto" ) -> Any:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def _lowercase ( self ) -> Tuple:
self.enable_attention_slicing(UpperCamelCase__ )
def _lowercase ( self ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase : int = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ , UpperCamelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self ) -> List[Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 50 , UpperCamelCase__ = 7.5 , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 1 , **UpperCamelCase__ , ) -> List[Any]:
lowerCamelCase : str = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase : Union[str, Any] = self.segmentation_model(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase : Tuple = self.numpy_to_pil(UpperCamelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase : Any = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , )
| 311 | 0 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case ( snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(snake_case ):
for j in range(snake_case ):
lowerCAmelCase = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_UpperCamelCase : int = imread("image_data/lena.jpg", 1)
# convert to its negative
_UpperCamelCase : Union[str, Any] = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 719 |
'''simple docstring'''
import torch
def snake_case ( ) -> List[str]:
"""simple docstring"""
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
else:
lowerCAmelCase = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 514 | 0 |
"""simple docstring"""
from math import pow
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ,):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
UpperCamelCase = int(pow(_lowercase ,_lowercase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
UpperCamelCase , UpperCamelCase = backtrack(
_lowercase ,_lowercase ,current_number + 1 ,_lowercase ,_lowercase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
UpperCamelCase , UpperCamelCase = backtrack(
_lowercase ,_lowercase ,current_number + 1 ,_lowercase ,_lowercase )
return current_sum, solutions_count
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(_lowercase ,_lowercase ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class A__ :
"""simple docstring"""
def __init__( self: Union[str, Any] , __a: int , )-> Dict:
lowerCamelCase : Optional[Any] = parent
lowerCamelCase : int = 13
lowerCamelCase : Tuple = 7
lowerCamelCase : Any = True
lowerCamelCase : List[Any] = True
lowerCamelCase : List[Any] = True
lowerCamelCase : Optional[int] = 99
lowerCamelCase : str = 32
lowerCamelCase : Dict = 2
lowerCamelCase : Optional[Any] = 4
lowerCamelCase : List[str] = 37
lowerCamelCase : Any = """gelu"""
lowerCamelCase : List[Any] = 0.1
lowerCamelCase : Optional[Any] = 0.1
lowerCamelCase : List[Any] = 512
lowerCamelCase : Optional[int] = 16
lowerCamelCase : str = 2
lowerCamelCase : Optional[int] = 0.02
lowerCamelCase : Dict = 3
lowerCamelCase : List[Any] = 4
lowerCamelCase : Union[str, Any] = None
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : int = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : str = None
lowerCamelCase : Dict = None
lowerCamelCase : Optional[Any] = None
if self.use_labels:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Union[str, Any] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Any )-> List[Any]:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = self.prepare_config_and_inputs()
lowerCamelCase : Dict = True
lowerCamelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self: List[Any] , __a: str , __a: int , __a: Any , __a: Dict , __a: int , __a: str )-> Union[str, Any]:
lowerCamelCase : List[str] = TFEsmModel(config=__a )
lowerCamelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCamelCase : Union[str, Any] = model(__a )
lowerCamelCase : Union[str, Any] = [input_ids, input_mask]
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Dict = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self: List[str] , __a: Any , __a: List[Any] , __a: Tuple , __a: Tuple , __a: int , __a: Optional[int] , __a: str , __a: Optional[int] , )-> Dict:
lowerCamelCase : str = True
lowerCamelCase : List[str] = TFEsmModel(config=__a )
lowerCamelCase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
lowerCamelCase : List[Any] = model(__a )
lowerCamelCase : int = [input_ids, input_mask]
lowerCamelCase : int = model(__a , encoder_hidden_states=__a )
# Also check the case where encoder outputs are not passed
lowerCamelCase : List[Any] = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self: Optional[int] , __a: Optional[Any] , __a: Tuple , __a: List[str] , __a: Tuple , __a: Optional[Any] , __a: str )-> Any:
lowerCamelCase : Optional[int] = TFEsmForMaskedLM(config=__a )
lowerCamelCase : Dict = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: Dict , __a: Optional[Any] , __a: Any , __a: int , __a: Optional[int] , __a: str , __a: List[Any] )-> Any:
lowerCamelCase : int = self.num_labels
lowerCamelCase : Dict = TFEsmForTokenClassification(config=__a )
lowerCamelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCamelCase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Dict )-> str:
lowerCamelCase : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Dict = config_and_inputs
lowerCamelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =(
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ : int =(
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ : Optional[Any] =False
snake_case__ : Any =False
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : Optional[Any] = TFEsmModelTester(self )
lowerCamelCase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Tuple:
self.config_tester.run_common_tests()
def a__ ( self: str )-> Any:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Any )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Union[str, Any] )-> str:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Tuple = TFEsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def a__ ( self: List[Any] )-> Optional[Any]:
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def a__ ( self: Tuple )-> str:
pass
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(__a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase : Dict = model.get_bias()
assert isinstance(__a , __a )
for k, v in name.items():
assert isinstance(__a , tf.Variable )
else:
lowerCamelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
lowerCamelCase : str = model.get_bias()
assert name is None
@require_tf
class A__ ( unittest.TestCase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Tuple:
lowerCamelCase : List[Any] = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowerCamelCase : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Union[str, Any] = model(__a )[0]
lowerCamelCase : Union[str, Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __a )
# compare the actual values for a slice.
lowerCamelCase : Any = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def a__ ( self: List[Any] )-> Optional[int]:
lowerCamelCase : int = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowerCamelCase : Any = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : List[str] = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : str = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 222 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[Any] = []
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Tuple = []
for d in reversed(__lowerCAmelCase ):
idx.append(flat_idx % d )
snake_case_ : Union[str, Any] = flat_idx // d
return tuple(reversed(__lowerCAmelCase ) )
@torch.jit.ignore
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a = None , __a = None , ):
def reduce_edge_list(__a ) -> None:
snake_case_ : int = True
for i in range(len(__lowerCAmelCase ) ):
snake_case_ : Dict = -1 * (i + 1)
l[reversed_idx] &= tally
snake_case_ : Optional[Any] = l[reversed_idx]
if start_edges is None:
snake_case_ : Dict = [s == 0 for s in start]
reduce_edge_list(__lowerCAmelCase )
if end_edges is None:
snake_case_ : Any = [e == (d - 1) for e, d in zip(__lowerCAmelCase , __lowerCAmelCase )]
reduce_edge_list(__lowerCAmelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__lowerCAmelCase ) == 0:
return [()]
elif len(__lowerCAmelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
snake_case_ : List[Any] = []
snake_case_ : int = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__lowerCAmelCase , __lowerCAmelCase ):
if s == e:
path_list.append(slice(__lowerCAmelCase , s + 1 ) )
else:
break
snake_case_ : Union[str, Any] = tuple(__lowerCAmelCase )
snake_case_ : Dict = len(__lowerCAmelCase )
# start == end, and we're done
if divergence_idx == len(__lowerCAmelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case_ : Tuple = start[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case_ : str = end[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
snake_case_ : Optional[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ):
snake_case_ : Any = t.shape[:no_batch_dims]
snake_case_ : List[Any] = list(_flat_idx_to_idx(__lowerCAmelCase , __lowerCAmelCase ) )
# _get_minimal_slice_set is inclusive
snake_case_ : Any = list(_flat_idx_to_idx(flat_end - 1 , __lowerCAmelCase ) )
# Get an ordered list of slices to perform
snake_case_ : Union[str, Any] = _get_minimal_slice_set(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
snake_case_ : Optional[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a = False , __a = None , __a = False , ):
if not (len(__lowerCAmelCase ) > 0):
raise ValueError('Must provide at least one input' )
snake_case_ : Dict = [shape[:no_batch_dims] for shape in _fetch_dims(__lowerCAmelCase )]
snake_case_ : str = tuple([max(__lowerCAmelCase ) for s in zip(*__lowerCAmelCase )] )
def _prep_inputs(__a ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
snake_case_ : Tuple = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
snake_case_ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
snake_case_ : Tuple = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
snake_case_ : Dict = tensor_tree_map(_prep_inputs , __lowerCAmelCase )
snake_case_ : str = None
if _out is not None:
snake_case_ : int = tensor_tree_map(lambda __a : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
snake_case_ : List[str] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
snake_case_ : List[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__a ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
snake_case_ : int = 0
snake_case_ : Optional[int] = prepped_outputs
for _ in range(__lowerCAmelCase ):
# Chunk the input
if not low_mem:
snake_case_ : Optional[Any] = _select_chunk
else:
snake_case_ : Any = partial(
_chunk_slice , flat_start=__lowerCAmelCase , flat_end=min(__lowerCAmelCase , i + chunk_size ) , no_batch_dims=len(__lowerCAmelCase ) , )
snake_case_ : Tuple = tensor_tree_map(__lowerCAmelCase , __lowerCAmelCase )
# Run the layer on the chunk
snake_case_ : str = layer(**__lowerCAmelCase )
# Allocate space for the output
if out is None:
snake_case_ : List[Any] = tensor_tree_map(lambda __a : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __lowerCAmelCase )
# Put the chunk in its pre-allocated space
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
def assign(__a , __a ) -> None:
for k, v in da.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assign(__lowerCAmelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
snake_case_ : Optional[int] = da[k]
assign(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for xa, xa in zip(__lowerCAmelCase , __lowerCAmelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
snake_case_ : List[str] = xa
elif isinstance(__lowerCAmelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
snake_case_ : Union[str, Any] = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
snake_case_ : List[Any] = tensor_tree_map(lambda __a : t.view(orig_batch_dims + t.shape[1:] ) , __lowerCAmelCase )
return out
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , _A : int = 512 , ) -> Dict:
"""simple docstring"""
snake_case_ : List[Any] = max_chunk_size
snake_case_ : Tuple = None
snake_case_ : Optional[Any] = None
def UpperCAmelCase_ ( self : Optional[Any] , _A : Callable , _A : tuple , _A : int ) -> Union[str, Any]:
"""simple docstring"""
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
snake_case_ : Optional[Any] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
snake_case_ : str = [c for c in candidates if c > min_chunk_size]
snake_case_ : Union[str, Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_A : int ) -> bool:
try:
with torch.no_grad():
fn(*lowerCAmelCase_ , chunk_size=lowerCAmelCase_ )
return True
except RuntimeError:
return False
snake_case_ : Dict = 0
snake_case_ : Dict = len(lowerCAmelCase_ ) - 1
while i > min_viable_chunk_size_index:
snake_case_ : Any = test_chunk_size(candidates[i] )
if not viable:
snake_case_ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
snake_case_ : List[str] = i
snake_case_ : Tuple = (i + len(lowerCAmelCase_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCAmelCase_ ( self : Union[str, Any] , _A : Iterable , _A : Iterable ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = True
for aa, aa in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
assert type(lowerCAmelCase_ ) == type(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , (list, tuple) ):
consistent &= self._compare_arg_caches(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case_ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda _A : x[0] )]
snake_case_ : Tuple = [v for _, v in sorted(aa.items() , key=lambda _A : x[0] )]
consistent &= self._compare_arg_caches(lowerCAmelCase_ , lowerCAmelCase_ )
else:
consistent &= aa == aa
return consistent
def UpperCAmelCase_ ( self : int , _A : Callable , _A : tuple , _A : int , ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = True
snake_case_ : Optional[int] = tree_map(lambda _A : a.shape if isinstance(lowerCAmelCase_ , torch.Tensor ) else a , lowerCAmelCase_ , lowerCAmelCase_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowerCAmelCase_ )
snake_case_ : Dict = self._compare_arg_caches(self.cached_arg_data , lowerCAmelCase_ )
else:
# Otherwise, we can reuse the precomputed value
snake_case_ : Optional[Any] = False
if not consistent:
snake_case_ : Any = self._determine_favorable_chunk_size(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
snake_case_ : Union[str, Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 712 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: str = "lilt"
def __init__( self : Any , _A : Dict=30522 , _A : List[Any]=768 , _A : str=12 , _A : int=12 , _A : Optional[int]=3072 , _A : Optional[Any]="gelu" , _A : Dict=0.1 , _A : Tuple=0.1 , _A : str=512 , _A : Union[str, Any]=2 , _A : Union[str, Any]=0.0_2 , _A : Optional[Any]=1E-12 , _A : Optional[int]=0 , _A : Dict="absolute" , _A : Any=None , _A : Optional[Any]=4 , _A : Any=1024 , **_A : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_A , **_A )
snake_case_ : List[str] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Tuple = hidden_act
snake_case_ : Tuple = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Dict = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : List[str] = position_embedding_type
snake_case_ : Optional[int] = classifier_dropout
snake_case_ : Dict = channel_shrink_ratio
snake_case_ : Tuple = max_ad_position_embeddings
| 534 | 0 |
class __lowercase :
"""simple docstring"""
def __init__( self )-> Optional[int]:
_SCREAMING_SNAKE_CASE = {}
def __magic_name__ ( self )-> None:
print(self.vertex )
for i in self.vertex:
print(A_ , ' -> ' , ' -> '.join([str(A_ ) for j in self.vertex[i]] ) )
def __magic_name__ ( self , A_ , A_ )-> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(A_ )
else:
# else make a new vertex
_SCREAMING_SNAKE_CASE = [to_vertex]
def __magic_name__ ( self )-> None:
# visited array for storing already visited nodes
_SCREAMING_SNAKE_CASE = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(A_ , A_ )
def __magic_name__ ( self , A_ , A_ )-> None:
# mark start vertex as visited
_SCREAMING_SNAKE_CASE = True
print(A_ , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(A_ , A_ )
if __name__ == "__main__":
snake_case : List[str] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 605 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase__ )
if n > 1:
factors.append(UpperCAmelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605 | 1 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase : Optional[Any] = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
lowercase : List[Any] = {
"facebook/blenderbot_small-90M": 512,
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase = BlenderbotSmallTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=lowerCAmelCase_ , merges=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , ) , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = add_prefix_space
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 542 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> int:
_snake_case = args.log_outputs
_snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
_snake_case = load_metric('wer' )
_snake_case = load_metric('cer' )
# compute metrics
_snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
_snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
_snake_case = F'WER: {wer_result}\nCER: {cer_result}'
print(__A )
with open(F'{dataset_id}_eval_results.txt' , 'w' ) as f:
f.write(__A )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_snake_case = F'log_{dataset_id}_predictions.txt'
_snake_case = F'log_{dataset_id}_targets.txt'
with open(__A , 'w' ) as p, open(__A , 'w' ) as t:
# mapping function to write output
def write_to_file(__A , __A ):
p.write(F'{i}' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'{i}' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__A , with_indices=__A )
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_snake_case = re.sub(__A , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
_snake_case = ' '.join(text.split(__A ) )
return text
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[int]:
# load dataset
_snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__A )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
_snake_case = feature_extractor.sampling_rate
# resample audio
_snake_case = dataset.cast_column('audio' , Audio(sampling_rate=__A ) )
# load eval pipeline
if args.device is None:
_snake_case = 0 if torch.cuda.is_available() else -1
_snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__A ):
_snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
_snake_case = prediction['text']
_snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
_snake_case = dataset.map(__A , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__A , __A )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
lowercase : Optional[int] = parser.parse_args()
main(args)
| 542 | 1 |
def __a ( lowerCAmelCase_ : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def __a ( lowerCAmelCase_ : str ) -> bool:
'''simple docstring'''
UpperCAmelCase_= credit_card_number
UpperCAmelCase_= 0
UpperCAmelCase_= len(lowerCAmelCase_ ) - 2
for i in range(lowerCAmelCase_ ,-1 ,-2 ):
# double the value of every second digit
UpperCAmelCase_= int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCAmelCase_= cc_number[:i] + str(lowerCAmelCase_ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase_ ) - 1 ,-1 ,-2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __a ( lowerCAmelCase_ : str ) -> bool:
'''simple docstring'''
UpperCAmelCase_= F"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(F"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(lowerCAmelCase_ ) <= 16:
print(F"""{error_message} of its length.""" )
return False
if not validate_initial_digits(lowerCAmelCase_ ):
print(F"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(lowerCAmelCase_ ):
print(F"""{error_message} it fails the Luhn check.""" )
return False
print(F"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 593 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Dict = ConsistencyModelPipeline
a__ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a__ : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a__ : Any = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_= UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_= UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : int=False ) -> List[Any]:
if class_cond:
UpperCAmelCase_= self.dummy_cond_unet
else:
UpperCAmelCase_= self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str=0 ) -> Optional[Any]:
if str(__UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase_= torch.manual_seed(__UpperCAmelCase )
else:
UpperCAmelCase_= torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_= {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components(class_cond=__UpperCAmelCase )
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= 0
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= None
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components(class_cond=__UpperCAmelCase )
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= None
UpperCAmelCase_= 0
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : int="cpu" , __UpperCAmelCase : str=torch.floataa , __UpperCAmelCase : Tuple=(1, 3, 64, 64) ) -> str:
UpperCAmelCase_= torch.manual_seed(__UpperCAmelCase )
UpperCAmelCase_= {
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
UpperCAmelCase_= self.get_fixed_latents(seed=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase , shape=__UpperCAmelCase )
UpperCAmelCase_= latents
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Any=0 , __UpperCAmelCase : int="cpu" , __UpperCAmelCase : Optional[Any]=torch.floataa , __UpperCAmelCase : Any=(1, 3, 64, 64) ) -> List[str]:
if type(__UpperCAmelCase ) == str:
UpperCAmelCase_= torch.device(__UpperCAmelCase )
UpperCAmelCase_= torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_= randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
return latents
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs()
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs()
UpperCAmelCase_= 1
UpperCAmelCase_= None
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ):
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ):
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 593 | 1 |
def lowerCAmelCase_ ( lowercase: int = 1_000 ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase: List[Any] = 1, 1
_UpperCamelCase: int = 2
while True:
_UpperCamelCase: List[Any] = 0
_UpperCamelCase: Optional[int] = fa + fa
_UpperCamelCase , _UpperCamelCase: Any = fa, f
index += 1
for _ in str(lowercase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 264 | import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCAmelCase_ ( lowercase: Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase: Tuple = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def lowerCAmelCase_ ( lowercase: int ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase: Optional[int] = emb.weight.shape
_UpperCamelCase: int = nn.Linear(lowercase , lowercase , bias=lowercase )
_UpperCamelCase: Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( lowercase: Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase: Optional[int] = torch.load(lowercase , map_location='''cpu''' )
_UpperCamelCase: Any = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
_UpperCamelCase: List[Any] = mam_aaa['''model''']
remove_ignore_keys_(lowercase )
_UpperCamelCase: Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_UpperCamelCase: List[Any] = MaMaaaConfig(
vocab_size=lowercase , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
_UpperCamelCase: List[Any] = state_dict['''decoder.embed_tokens.weight''']
_UpperCamelCase: Optional[int] = MaMaaaForConditionalGeneration(lowercase )
model.model.load_state_dict(lowercase , strict=lowercase )
_UpperCamelCase: Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 264 | 1 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=4 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_choices
def a ( self ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a ( self ):
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def a ( self ):
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE : List[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a ( self ):
snake_case_ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def a ( self ):
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case )
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def a ( self ):
snake_case_ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case )
snake_case_ = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
snake_case_ = model(snake_case )[0]
snake_case_ = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , snake_case )
# compare the actual values for a slice.
snake_case_ = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def a ( self ):
snake_case_ = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case )
snake_case_ = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
snake_case_ = model(snake_case )[0]
# compare the actual values for a slice.
snake_case_ = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 362 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''image_processor''', '''tokenizer''']
__SCREAMING_SNAKE_CASE : Tuple = '''AutoImageProcessor'''
__SCREAMING_SNAKE_CASE : Dict = '''AutoTokenizer'''
def __init__( self , snake_case , snake_case ):
super().__init__(snake_case , snake_case )
snake_case_ = self.image_processor
def __call__( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
snake_case_ = self.tokenizer(snake_case , return_tensors=snake_case , **snake_case )
if images is not None:
snake_case_ = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
snake_case_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def a ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def a ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def a ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 362 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowercase = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowercase = [ord(letter) for letter in string.ascii_lowercase]
lowercase = {ord(char) for char in VALID_CHARS}
lowercase = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A__ ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str | None:
'''simple docstring'''
snake_case__ : Tuple = ""
snake_case__ : List[Any] = 42
snake_case__ : str = 42
snake_case__ : Optional[int] = 42
for keychar, cipherchar in zip(cycle(_UpperCAmelCase ) , _UpperCAmelCase ):
snake_case__ : Any = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_UpperCAmelCase )
return decoded
def A__ ( _UpperCAmelCase : Any ) -> list[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = []
for key in product(_UpperCAmelCase , repeat=3 ):
snake_case__ : Optional[Any] = try_key(_UpperCAmelCase , _UpperCAmelCase )
if encoded is not None:
possibles.append(_UpperCAmelCase )
return possibles
def A__ ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A__ ( _UpperCAmelCase : int = "p059_cipher.txt" ) -> int:
'''simple docstring'''
snake_case__ : Dict = 42
snake_case__ : Any = 42
snake_case__ : Union[str, Any] = 42
snake_case__ : Optional[int] = 42
snake_case__ : int = Path(_UpperCAmelCase ).parent.joinpath(_UpperCAmelCase ).read_text(encoding="utf-8" )
snake_case__ : Union[str, Any] = [int(_UpperCAmelCase ) for number in data.strip().split("," )]
snake_case__ : int = filter_valid_chars(_UpperCAmelCase )
for common_word in COMMON_WORDS:
snake_case__ : str = filter_common_word(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) == 1:
break
snake_case__ : Optional[int] = possibles[0]
return sum(ord(_UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 720 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def A__ ( _UpperCAmelCase : Tuple=None ) -> Any:
'''simple docstring'''
if subparsers is not None:
snake_case__ : List[Any] = subparsers.add_parser("test" )
else:
snake_case__ : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=_UpperCAmelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def A__ ( _UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case__ : Optional[int] = script_name
else:
snake_case__ : List[str] = F"""--config_file={args.config_file} {script_name}"""
snake_case__ : List[Any] = ["accelerate-launch"] + test_args.split()
snake_case__ : Any = execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def A__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = test_command_parser()
snake_case__ : str = parser.parse_args()
test_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 150 | 0 |
from __future__ import annotations
import math
def __A(lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
if len(lowercase__ ) != 2 or len(a[0] ) != 2 or len(lowercase__ ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
_UpperCamelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __A(lowerCAmelCase , lowerCAmelCase ) -> List[str]:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase__ ) )
]
def __A(lowerCAmelCase , lowerCAmelCase ) -> Any:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase__ ) )
]
def __A(lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
if len(lowercase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
_UpperCamelCase = len(lowercase__ )
_UpperCamelCase = matrix_length // 2
_UpperCamelCase = [[a[i][j] for j in range(lowercase__ , lowercase__ )] for i in range(lowercase__ )]
_UpperCamelCase = [
[a[i][j] for j in range(lowercase__ , lowercase__ )] for i in range(lowercase__ , lowercase__ )
]
_UpperCamelCase = [[a[i][j] for j in range(lowercase__ )] for i in range(lowercase__ )]
_UpperCamelCase = [[a[i][j] for j in range(lowercase__ )] for i in range(lowercase__ , lowercase__ )]
return top_left, top_right, bot_left, bot_right
def __A(lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
return len(lowercase__ ), len(matrix[0] )
def __A(lowerCAmelCase ) -> List[str]:
"""simple docstring"""
print("""\n""".join(str(lowercase__ ) for line in matrix ) )
def __A(lowerCAmelCase , lowerCAmelCase ) -> int:
"""simple docstring"""
if matrix_dimensions(lowercase__ ) == (2, 2):
return default_matrix_multiplication(lowercase__ , lowercase__ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = split_matrix(lowercase__ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = split_matrix(lowercase__ )
_UpperCamelCase = actual_strassen(lowercase__ , matrix_subtraction(lowercase__ , lowercase__ ) )
_UpperCamelCase = actual_strassen(matrix_addition(lowercase__ , lowercase__ ) , lowercase__ )
_UpperCamelCase = actual_strassen(matrix_addition(lowercase__ , lowercase__ ) , lowercase__ )
_UpperCamelCase = actual_strassen(lowercase__ , matrix_subtraction(lowercase__ , lowercase__ ) )
_UpperCamelCase = actual_strassen(matrix_addition(lowercase__ , lowercase__ ) , matrix_addition(lowercase__ , lowercase__ ) )
_UpperCamelCase = actual_strassen(matrix_subtraction(lowercase__ , lowercase__ ) , matrix_addition(lowercase__ , lowercase__ ) )
_UpperCamelCase = actual_strassen(matrix_subtraction(lowercase__ , lowercase__ ) , matrix_addition(lowercase__ , lowercase__ ) )
_UpperCamelCase = matrix_addition(matrix_subtraction(matrix_addition(lowercase__ , lowercase__ ) , lowercase__ ) , lowercase__ )
_UpperCamelCase = matrix_addition(lowercase__ , lowercase__ )
_UpperCamelCase = matrix_addition(lowercase__ , lowercase__ )
_UpperCamelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowercase__ , lowercase__ ) , lowercase__ ) , lowercase__ )
# construct the new matrix from our 4 quadrants
_UpperCamelCase = []
for i in range(len(lowercase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowercase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __A(lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if matrix_dimensions(lowercase__ )[1] != matrix_dimensions(lowercase__ )[0]:
_UpperCamelCase = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(lowercase__ )
_UpperCamelCase = matrix_dimensions(lowercase__ )
_UpperCamelCase = matrix_dimensions(lowercase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_UpperCamelCase = max(*lowercase__ , *lowercase__ )
_UpperCamelCase = int(math.pow(2 , math.ceil(math.loga(lowercase__ ) ) ) )
_UpperCamelCase = matrixa
_UpperCamelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowercase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowercase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowercase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_UpperCamelCase = actual_strassen(lowercase__ , lowercase__ )
# Removing the additional zeros
for i in range(0 , lowercase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowercase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCamelCase__ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCamelCase__ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 612 |
"""simple docstring"""
def a_ ( lowercase__ :str, lowercase__ :int ):
return [sentence[i : i + ngram_size] for i in range(len(lowercase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281 | 0 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
_a : List[Any] = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
_a : Optional[int] = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
_a : Optional[int] = BeautifulSoup(res.text, """html.parser""")
_a : Tuple = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 721 |
"""simple docstring"""
import argparse
import json
import subprocess
def a__ ( a : Optional[Any] , a : Optional[int] ):
"""simple docstring"""
_snake_case : str = []
_snake_case : Optional[Any] = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
_snake_case : Dict = subprocess.run(a , shell=a , stdout=subprocess.PIPE )
_snake_case : Tuple = output.stdout.decode("utf-8" )
_snake_case : List[str] = json.loads(a )
_snake_case : Any = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(a )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(a ) )
if len(a ) > 0:
_snake_case : Any = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def a__ ( a : Optional[int] ):
"""simple docstring"""
return values.split("," )
_a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
_a : List[str] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 87 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase ( __A : Union[str, Any] , __A : int , __A : Dict , __A : Optional[int] ) -> str:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowercase ( __A : str , __A : List[str] , __A : Tuple , __A : Dict , __A : Tuple=True ) -> Tuple:
'''simple docstring'''
model.train()
snake_case : List[Any] = model(__A )
snake_case : int = F.mse_loss(__A , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__A )
def lowercase ( __A : List[str] , __A : int=False ) -> Union[str, Any]:
'''simple docstring'''
set_seed(42 )
snake_case : Optional[Any] = RegressionModel()
snake_case : Dict = deepcopy(__A )
snake_case : Optional[Any] = RegressionDataset(length=80 )
snake_case : Optional[Any] = DataLoader(__A , batch_size=16 )
model.to(accelerator.device )
if sched:
snake_case : List[str] = AdamW(params=model.parameters() , lr=1E-3 )
snake_case : List[str] = AdamW(params=ddp_model.parameters() , lr=1E-3 )
snake_case : int = LambdaLR(__A , lr_lambda=lambda __A : epoch**0.65 )
snake_case : Any = LambdaLR(__A , lr_lambda=lambda __A : epoch**0.65 )
# Make a copy of `model`
if sched:
snake_case , snake_case , snake_case , snake_case : Optional[int] = accelerator.prepare(__A , __A , __A , __A )
else:
snake_case , snake_case : Any = accelerator.prepare(__A , __A )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase ( __A : List[Any] ) -> Any:
'''simple docstring'''
snake_case , snake_case , snake_case : Union[str, Any] = get_training_setup(__A )
# Use a single batch
snake_case , snake_case : str = next(iter(__A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case , snake_case : List[Any] = accelerator.gather((ddp_input, ddp_target) )
snake_case , snake_case : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__A , __A , __A , __A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__A ):
step_model(__A , __A , __A , __A )
else:
# Sync grads
step_model(__A , __A , __A , __A )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__A , __A , __A , __A )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case : Tuple = ddp_input[torch.randperm(len(__A ) )]
def lowercase ( __A : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case , snake_case , snake_case : str = get_training_setup(__A )
# Use a single batch
snake_case , snake_case : Any = next(iter(__A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case , snake_case : List[str] = accelerator.gather((ddp_input, ddp_target) )
snake_case , snake_case : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__A , __A , __A , __A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__A ):
step_model(__A , __A , __A , __A )
else:
# Sync grads
step_model(__A , __A , __A , __A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case : Optional[int] = ddp_input[torch.randperm(len(__A ) )]
def lowercase ( __A : Union[str, Any]=False , __A : Any=False ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = Accelerator(
split_batches=__A , dispatch_batches=__A , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case , snake_case , snake_case : Optional[int] = get_training_setup(__A )
for iteration, batch in enumerate(__A ):
snake_case , snake_case : Optional[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case , snake_case : List[str] = accelerator.gather((ddp_input, ddp_target) )
snake_case , snake_case : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__A , __A , __A , __A , __A )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__A ):
step_model(__A , __A , __A , __A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__A ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case : Tuple = ddp_input[torch.randperm(len(__A ) )]
GradientState._reset_state()
def lowercase ( __A : Any=False , __A : str=False ) -> str:
'''simple docstring'''
snake_case : Any = Accelerator(
split_batches=__A , dispatch_batches=__A , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case : int = get_training_setup(__A , __A )
for iteration, batch in enumerate(__A ):
snake_case , snake_case : Optional[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case , snake_case : List[Any] = accelerator.gather((ddp_input, ddp_target) )
snake_case , snake_case : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__A , __A , __A , __A , __A )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__A )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__A ):
step_model(__A , __A , __A , __A )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
snake_case : List[str] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__A ))
if accelerator.num_processes > 1:
check_model_parameters(__A , __A , __A , __A )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowercase ( ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = Accelerator()
snake_case : Dict = RegressionDataset(length=80 )
snake_case : Tuple = DataLoader(__A , batch_size=16 )
snake_case : Tuple = RegressionDataset(length=96 )
snake_case : Optional[Any] = DataLoader(__A , batch_size=16 )
snake_case , snake_case : Any = accelerator.prepare(__A , __A )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__A )
if iteration < len(__A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__A )
if batch_num < len(__A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase ( ) -> List[str]:
'''simple docstring'''
snake_case : str = Accelerator()
snake_case : Dict = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(__A )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(__A )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(__A , __A )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(__A , __A )
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 36 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
A : Union[str, Any] = logging.get_logger(__name__)
A : Dict[Optional[str], Type[Formatter]] = {}
A : Dict[Optional[str], str] = {}
A : Dict[Optional[str], Exception] = {}
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , ) -> str:
'''simple docstring'''
__snake_case = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__snake_case = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__snake_case = format_type
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[Any]:
'''simple docstring'''
__snake_case = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__snake_case = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
A : Any = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
A : Tuple = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
A : Optional[int] = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _lowerCAmelCase ( _lowerCAmelCase ) -> Optional[str]:
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _lowerCAmelCase ( _lowerCAmelCase , **_lowerCAmelCase ) -> Formatter:
'''simple docstring'''
__snake_case = get_format_type_from_alias(_lowerCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_lowerCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 371 | 0 |
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
lowerCAmelCase__ : Optional[int] = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def a_ ( ):
UpperCAmelCase__ = Github(os.environ['GITHUB_TOKEN'] )
UpperCAmelCase__ = g.get_repo('huggingface/transformers' )
UpperCAmelCase__ = repo.get_issues(state='open' )
for issue in open_issues:
UpperCAmelCase__ = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase : i.created_at , reverse=lowerCamelCase )
UpperCAmelCase__ = comments[0] if len(lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 632 | """simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCAmelCase__ : Optional[int] = False
class snake_case ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
image=lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 632 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCAmelCase_ ( __lowerCamelCase : Dict ,__lowerCamelCase : bool = True ,__lowerCamelCase : float = math.inf ,__lowerCamelCase : float = -math.inf ,__lowerCamelCase : float = math.inf ,__lowerCamelCase : float = -math.inf ,__lowerCamelCase : bool = False ,__lowerCamelCase : float = 1_00 ,__lowerCamelCase : float = 0.01 ,__lowerCamelCase : float = 1 ,):
lowercase_ :Optional[Any] = False
lowercase_ :Dict = search_prob
lowercase_ :Optional[int] = start_temperate
lowercase_ :Any = []
lowercase_ :Dict = 0
lowercase_ :List[Any] = None
while not search_end:
lowercase_ :Union[str, Any] = current_state.score()
if best_state is None or current_score > best_state.score():
lowercase_ :Optional[Any] = current_state
scores.append(__lowerCamelCase )
iterations += 1
lowercase_ :Dict = None
lowercase_ :List[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowercase_ :Any = random.randint(0 ,len(__lowerCamelCase ) - 1 ) # picking a random neighbor
lowercase_ :Optional[int] = neighbors.pop(__lowerCamelCase )
lowercase_ :Dict = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowercase_ :Tuple = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowercase_ :Tuple = picked_neighbor
else:
lowercase_ :Optional[int] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowercase_ :Union[str, Any] = picked_neighbor
lowercase_ :Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowercase_ :int = True
else:
lowercase_ :List[str] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__lowerCamelCase ) ,__lowerCamelCase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : int ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase : Union[str, Any] =SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase : Tuple =simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase : List[str] =SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase : int =simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def UpperCAmelCase_ ( __lowerCamelCase : Optional[Any] ,__lowerCamelCase : Union[str, Any] ):
return (3 * x**2) - (6 * y)
lowerCAmelCase : List[str] =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase : str =simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
lowerCAmelCase : Tuple =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase : Dict =simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
| 172 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( __lowerCamelCase : np.ndarray ):
lowercase_ , lowercase_ , lowercase_ :int = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def UpperCAmelCase_ ( __lowerCamelCase : np.ndarray ):
return (gray > 1_27) & (gray <= 2_55)
def UpperCAmelCase_ ( __lowerCamelCase : np.ndarray ,__lowerCamelCase : np.ndarray ):
lowercase_ :Optional[Any] = np.zeros_like(__lowerCamelCase )
lowercase_ :Tuple = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase_ :Union[str, Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase_ :int = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase_ :Dict = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowerCAmelCase : Union[str, Any] =Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
lowerCAmelCase : List[str] =np.array(Image.open(lena_path))
# kernel to be applied
lowerCAmelCase : Optional[Any] =np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowerCAmelCase : Optional[int] =dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowerCAmelCase : str =Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 172 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 395 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def A_ ( __lowercase ):
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def A_ ( __lowercase ):
class a__ :
def __init__( self :int , _lowerCamelCase :Any ):
'''simple docstring'''
UpperCamelCase_ : str =metric_id
class a__ :
UpperCAmelCase__ = [MetricMock(A__ ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
if "tmp_path" in args:
UpperCamelCase_ : List[Any] =tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(__lowercase , match='https://huggingface.co/docs/evaluate' ):
func(*__lowercase )
| 395 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase = input("""Enter image url: """).strip()
print(F"Downloading image from {url} ...")
lowerCamelCase = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
lowerCamelCase = requests.get(image_url).content
lowerCamelCase = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 |
UpperCAmelCase : Any = [0, 2, 4, 6, 8]
UpperCAmelCase : Optional[Any] = [1, 3, 5, 7, 9]
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowerCamelCase = 0
for digit in range(10 ):
lowerCamelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , lowerCamelCase__ , lowerCamelCase__ )
return result
lowerCamelCase = 0
for digita in range(10 ):
lowerCamelCase = digita
if (remainder + digita) % 2 == 0:
lowerCamelCase = ODD_DIGITS
else:
lowerCamelCase = EVEN_DIGITS
for digita in other_parity_digits:
lowerCamelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , lowerCamelCase__ , lowerCamelCase__ , )
return result
def __lowerCamelCase ( lowerCamelCase__ : int = 9 ):
'''simple docstring'''
lowerCamelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(lowerCamelCase__ , 0 , [0] * length , lowerCamelCase__ )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 457 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->str:
"""simple docstring"""
__lowercase : int = BeautifulSoup(requests.get(_lowerCamelCase, params=_lowerCamelCase ).content, "html.parser" )
__lowercase : int = soup.find("div", attrs={"class": "gs_ri"} )
__lowercase : Optional[int] = div.find("div", attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
__A : str = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 3_0,
'pages': '3979-3990',
'year': 2_0_1_8,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 281 |
"""simple docstring"""
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : int , lowercase__ : List[Any] ):
__lowercase : Tuple = val
__lowercase : Optional[Any] = None
__lowercase : Tuple = None
def snake_case ( self : Optional[Any] , lowercase__ : List[str] ):
if self.val:
if val < self.val:
if self.left is None:
__lowercase : Tuple = Node(lowercase__ )
else:
self.left.insert(lowercase__ )
elif val > self.val:
if self.right is None:
__lowercase : Any = Node(lowercase__ )
else:
self.right.insert(lowercase__ )
else:
__lowercase : Optional[Any] = val
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->Any:
"""simple docstring"""
if root:
inorder(root.left, _lowerCamelCase )
res.append(root.val )
inorder(root.right, _lowerCamelCase )
def snake_case__ ( _lowerCamelCase ) ->Optional[Any]:
"""simple docstring"""
if len(_lowerCamelCase ) == 0:
return arr
__lowercase : int = Node(arr[0] )
for i in range(1, len(_lowerCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
__lowercase : Tuple = []
inorder(_lowerCamelCase, _lowerCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 281 | 1 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__lowerCamelCase : str = "__DUMMY_TRANSFORMERS_USER__"
__lowerCamelCase : Optional[Any] = "Dummy User"
__lowerCamelCase : str = "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"
__lowerCamelCase : List[Any] = "https://hub-ci.huggingface.co"
__lowerCamelCase : List[str] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
__lowerCamelCase : Any = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
__lowerCamelCase : str = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , lowerCAmelCase_ )
@pytest.fixture
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , lowerCAmelCase_ )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , lowerCAmelCase_ )
@pytest.fixture
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , lowerCAmelCase_ )
@pytest.fixture
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
HfFolder.save_token(lowerCAmelCase_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( ):
"""simple docstring"""
return HfApi(endpoint=lowerCAmelCase_ )
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = HfFolder.get_token()
HfFolder.save_token(lowerCAmelCase_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCAmelCase_ )
@pytest.fixture
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
def _cleanup_repo(lowerCAmelCase_ ):
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
@contextmanager
def _temporary_repo(lowerCAmelCase_ ):
try:
yield repo_id
finally:
cleanup_repo(lowerCAmelCase_ )
return _temporary_repo
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = f'repo_txt_data-{int(time.time() * 1_0E3 )}'
lowercase = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ )
hf_api.upload_file(
token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data/text_data.txt" , repo_id=lowerCAmelCase_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = f'repo_zipped_txt_data-{int(time.time() * 1_0E3 )}'
lowercase = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ )
hf_api.upload_file(
token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data.zip" , repo_id=lowerCAmelCase_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = f'repo_zipped_img_data-{int(time.time() * 1_0E3 )}'
lowercase = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ )
hf_api.upload_file(
token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data.zip" , repo_id=lowerCAmelCase_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 310 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCAmelCase :
def __init__(self : Optional[Any] , A__ : Optional[Any] , A__ : Optional[int]=sys.maxsize ) -> Optional[Any]:
lowercase = "bilinear"
lowercase = max_size
lowercase = short_edge_length
def __call__(self : Union[str, Any] , A__ : Optional[int] ) -> Tuple:
lowercase = []
for img in imgs:
lowercase , lowercase = img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase = size * 1.0 / min(A__ , A__ )
if h < w:
lowercase , lowercase = size, scale * w
else:
lowercase , lowercase = scale * h, size
if max(A__ , A__ ) > self.max_size:
lowercase = self.max_size * 1.0 / max(A__ , A__ )
lowercase = newh * scale
lowercase = neww * scale
lowercase = int(neww + 0.5 )
lowercase = int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase = Image.fromarray(A__ )
lowercase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowercase = np.asarray(A__ )
else:
lowercase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase = nn.functional.interpolate(
A__ , (newh, neww) , mode=self.interp_method , align_corners=A__ ).squeeze(0 )
img_augs.append(A__ )
return img_augs
class UpperCAmelCase :
def __init__(self : Union[str, Any] , A__ : List[Any] ) -> Optional[int]:
lowercase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowercase = cfg.INPUT.FORMAT
lowercase = cfg.SIZE_DIVISIBILITY
lowercase = cfg.PAD_VALUE
lowercase = cfg.INPUT.MAX_SIZE_TEST
lowercase = cfg.MODEL.DEVICE
lowercase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase = lambda A__ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase__ (self : List[Any] , A__ : Any ) -> int:
lowercase = tuple(max(A__ ) for s in zip(*[img.shape for img in images] ) )
lowercase = [im.shape[-2:] for im in images]
lowercase = [
nn.functional.pad(
A__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(A__ , A__ )
]
return torch.stack(A__ ), torch.tensor(A__ )
def __call__(self : Optional[int] , A__ : Union[str, Any] , A__ : Optional[Any]=False ) -> str:
with torch.no_grad():
if not isinstance(A__ , A__ ):
lowercase = [images]
if single_image:
assert len(A__ ) == 1
for i in range(len(A__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(A__ , images.pop(A__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
A__ , torch.as_tensor(img_tensorize(images.pop(A__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowercase = torch.tensor([im.shape[:2] for im in images] )
lowercase = self.aug(A__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase = [self.normalizer(A__ ) for x in images]
# now pad them to do the following operations
lowercase , lowercase = self.pad(A__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase = torch.true_divide(A__ , A__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
assert torch.isfinite(lowerCAmelCase_ ).all(), "Box tensor contains infinite or NaN!"
lowercase , lowercase = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCAmelCase_ )
tensor[:, 1].clamp_(min=0 , max=lowerCAmelCase_ )
tensor[:, 2].clamp_(min=0 , max=lowerCAmelCase_ )
tensor[:, 3].clamp_(min=0 , max=lowerCAmelCase_ )
| 310 | 1 |
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase = []
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
for i in range(len(__UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(__UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__UpperCamelCase , -1 , -1 ) , range(__UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__UpperCamelCase , -1 , -1 ) , range(__UpperCamelCase , len(__UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if row >= len(__UpperCamelCase ):
solution.append(__UpperCamelCase )
printboard(__UpperCamelCase )
print()
return True
for i in range(len(__UpperCamelCase ) ):
if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : List[Any] = 1
solve(__UpperCamelCase , row + 1 )
UpperCAmelCase__ : Optional[Any] = 0
return False
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
for i in range(len(__UpperCamelCase ) ):
for j in range(len(__UpperCamelCase ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
__UpperCAmelCase = 8
__UpperCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 194 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class __lowercase ( __lowerCamelCase ):
snake_case_ = """markuplm"""
def __init__( self : str ,A : List[Any]=30_522 ,A : Tuple=768 ,A : str=12 ,A : int=12 ,A : int=3_072 ,A : Optional[int]="gelu" ,A : Optional[int]=0.1 ,A : Optional[int]=0.1 ,A : Any=512 ,A : Any=2 ,A : str=0.0_2 ,A : int=1e-12 ,A : int=0 ,A : str=0 ,A : List[Any]=2 ,A : List[str]=256 ,A : Union[str, Any]=1_024 ,A : List[Any]=216 ,A : Any=1_001 ,A : Optional[int]=32 ,A : Any=50 ,A : int="absolute" ,A : Dict=True ,A : int=None ,**A : Optional[int] ,):
'''simple docstring'''
super().__init__(
pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A ,)
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : str = layer_norm_eps
UpperCAmelCase__ : Any = position_embedding_type
UpperCAmelCase__ : int = use_cache
UpperCAmelCase__ : List[str] = classifier_dropout
# additional properties
UpperCAmelCase__ : Optional[int] = max_depth
UpperCAmelCase__ : List[str] = max_xpath_tag_unit_embeddings
UpperCAmelCase__ : Any = max_xpath_subs_unit_embeddings
UpperCAmelCase__ : str = tag_pad_id
UpperCAmelCase__ : Dict = subs_pad_id
UpperCAmelCase__ : List[str] = xpath_unit_hidden_size
| 194 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> Optional[int]:
"""simple docstring"""
print('Loading config file...' )
def flatten_yaml_as_dict(lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]="" , lowerCamelCase_ : str="." ):
SCREAMING_SNAKE_CASE_ : str = []
for k, v in d.items():
SCREAMING_SNAKE_CASE_ : List[str] = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase , sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = argparse.Namespace()
with open(__UpperCamelCase , 'r' ) as yaml_file:
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = yaml.load(__UpperCamelCase , Loader=yaml.FullLoader )
SCREAMING_SNAKE_CASE_ : Any = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase , str(__UpperCamelCase ) ) )
return config
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = MobileViTVaConfig()
SCREAMING_SNAKE_CASE_ : str = False
# dataset
if task_name.startswith('imagenet1k_' ):
SCREAMING_SNAKE_CASE_ : Any = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
SCREAMING_SNAKE_CASE_ : Optional[int] = 3_84
else:
SCREAMING_SNAKE_CASE_ : int = 2_56
SCREAMING_SNAKE_CASE_ : List[Any] = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
SCREAMING_SNAKE_CASE_ : Dict = 3_84
else:
SCREAMING_SNAKE_CASE_ : List[str] = 2_56
SCREAMING_SNAKE_CASE_ : int = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
SCREAMING_SNAKE_CASE_ : Any = 1_51
SCREAMING_SNAKE_CASE_ : Dict = 5_12
SCREAMING_SNAKE_CASE_ : List[str] = 'ade20k-id2label.json'
SCREAMING_SNAKE_CASE_ : str = True
elif task_name.startswith('voc_' ):
SCREAMING_SNAKE_CASE_ : int = 21
SCREAMING_SNAKE_CASE_ : Any = 5_12
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'pascal-voc-id2label.json'
SCREAMING_SNAKE_CASE_ : Dict = True
# orig_config
SCREAMING_SNAKE_CASE_ : List[str] = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
SCREAMING_SNAKE_CASE_ : Dict = getattr(__UpperCamelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__UpperCamelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
SCREAMING_SNAKE_CASE_ : Any = getattr(__UpperCamelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
SCREAMING_SNAKE_CASE_ : str = getattr(__UpperCamelCase , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
SCREAMING_SNAKE_CASE_ : str = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
SCREAMING_SNAKE_CASE_ : List[Any] = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_12 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(__UpperCamelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
SCREAMING_SNAKE_CASE_ : int = 'huggingface/label-files'
SCREAMING_SNAKE_CASE_ : str = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE_ : int = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : int = idalabel
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Tuple = val
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int=False ) -> Dict:
"""simple docstring"""
if base_model:
SCREAMING_SNAKE_CASE_ : List[Any] = ''
else:
SCREAMING_SNAKE_CASE_ : List[Any] = 'mobilevitv2.'
SCREAMING_SNAKE_CASE_ : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
SCREAMING_SNAKE_CASE_ : List[Any] = k[8:]
else:
SCREAMING_SNAKE_CASE_ : int = k
if ".block." in k:
SCREAMING_SNAKE_CASE_ : Dict = k_new.replace('.block.' , '.' )
if ".conv." in k:
SCREAMING_SNAKE_CASE_ : int = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
SCREAMING_SNAKE_CASE_ : List[Any] = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
SCREAMING_SNAKE_CASE_ : List[Any] = k_new.replace('conv_1.' , F'{model_prefix}conv_stem.' )
for i in [1, 2]:
if F'layer_{i}.' in k:
SCREAMING_SNAKE_CASE_ : Tuple = k_new.replace(F'layer_{i}.' , F'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
SCREAMING_SNAKE_CASE_ : Any = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
SCREAMING_SNAKE_CASE_ : Optional[int] = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F'layer_{i}.0.' in k:
SCREAMING_SNAKE_CASE_ : str = k_new.replace(F'layer_{i}.0.' , F'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if F'layer_{i}.1.local_rep.0.' in k:
SCREAMING_SNAKE_CASE_ : List[str] = k_new.replace(F'layer_{i}.1.local_rep.0.' , F'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if F'layer_{i}.1.local_rep.1.' in k:
SCREAMING_SNAKE_CASE_ : Optional[int] = k_new.replace(F'layer_{i}.1.local_rep.1.' , F'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
SCREAMING_SNAKE_CASE_ : Any = [0, 1]
elif i == 4:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0, 1, 2, 3]
elif i == 5:
SCREAMING_SNAKE_CASE_ : int = [0, 1, 2]
for j in j_in:
if F'layer_{i}.1.global_rep.{j}.' in k:
SCREAMING_SNAKE_CASE_ : Dict = k_new.replace(
F'layer_{i}.1.global_rep.{j}.' , F'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if F'layer_{i}.1.global_rep.{j+1}.' in k:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = k_new.replace(
F'layer_{i}.1.global_rep.{j+1}.' , F'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if F'layer_{i}.1.conv_proj.' in k:
SCREAMING_SNAKE_CASE_ : Optional[int] = k_new.replace(F'layer_{i}.1.conv_proj.' , F'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
SCREAMING_SNAKE_CASE_ : List[str] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
SCREAMING_SNAKE_CASE_ : Optional[int] = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
SCREAMING_SNAKE_CASE_ : Tuple = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
SCREAMING_SNAKE_CASE_ : int = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
SCREAMING_SNAKE_CASE_ : int = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
SCREAMING_SNAKE_CASE_ : int = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
SCREAMING_SNAKE_CASE_ : Tuple = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
SCREAMING_SNAKE_CASE_ : List[Any] = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
SCREAMING_SNAKE_CASE_ : Optional[Any] = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = get_mobilevitva_config(__UpperCamelCase , __UpperCamelCase )
# load original state_dict
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(__UpperCamelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
SCREAMING_SNAKE_CASE_ : List[Any] = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
SCREAMING_SNAKE_CASE_ : Tuple = False
else:
SCREAMING_SNAKE_CASE_ : int = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
SCREAMING_SNAKE_CASE_ : Tuple = False
# remove and rename some keys of load the original model
SCREAMING_SNAKE_CASE_ : Union[str, Any] = checkpoint
remove_unused_keys(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(images=prepare_img() , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Any = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
SCREAMING_SNAKE_CASE_ : Any = outputs.logits
SCREAMING_SNAKE_CASE_ : Optional[Any] = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
SCREAMING_SNAKE_CASE_ : int = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase__ : Dict = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 105 |
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase_ ( __UpperCamelCase ):
A_ = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__UpperCamelCase , max_perimeter + 1 ):
A_ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__UpperCamelCase ):
A_ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase_ ( __UpperCamelCase = 10_00 ):
A_ = pythagorean_triple(__UpperCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''') | 141 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a__:
def __init__( self : Tuple , __snake_case : int , __snake_case : Tuple=13 , __snake_case : List[str]=30 , __snake_case : Tuple=2 , __snake_case : Any=3 , __snake_case : Dict=True , __snake_case : Optional[int]=True , __snake_case : int=32 , __snake_case : List[str]=5 , __snake_case : Any=4 , __snake_case : Optional[int]=37 , __snake_case : List[Any]="gelu" , __snake_case : int=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[Any]=10 , __snake_case : Dict=0.02 , __snake_case : Tuple=3 , __snake_case : Union[str, Any]=None , __snake_case : List[Any]=2 , ):
a : List[Any] = parent
a : Optional[int] = batch_size
a : Any = image_size
a : Optional[Any] = patch_size
a : Optional[Any] = num_channels
a : Tuple = is_training
a : Optional[Any] = use_labels
a : Union[str, Any] = hidden_size
a : Tuple = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Union[str, Any] = intermediate_size
a : Any = hidden_act
a : Tuple = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Any = type_sequence_label_size
a : Tuple = initializer_range
a : List[Any] = scope
a : Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
a : List[str] = (image_size // patch_size) ** 2
a : List[str] = num_patches + 2
def lowercase_ ( self : Union[str, Any] ):
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : List[Any] = None
if self.use_labels:
a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Dict = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : str ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[str] ):
a : Optional[int] = DeiTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[Any] , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Any ):
a : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a : List[str] = 1
a : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self : Any , __snake_case : str , __snake_case : List[str] , __snake_case : int ):
a : str = self.type_sequence_label_size
a : List[str] = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a : Any = 1
a : str = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self : Any ):
a : Dict = self.prepare_config_and_inputs()
(
a
) : Tuple = config_and_inputs
a : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__( _A , _A , unittest.TestCase ):
_a = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_a = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
def lowercase_ ( self : int ):
a : str = DeiTModelTester(self )
a : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def lowercase_ ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def lowercase_ ( self : Any ):
pass
def lowercase_ ( self : Any ):
a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Dict = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def lowercase_ ( self : Any ):
a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(__lowerCamelCase )
a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Any = [*signature.parameters.keys()]
a : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def lowercase_ ( self : int ):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def lowercase_ ( self : int ):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[Any] , __snake_case : List[Any]=False ):
a : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase_ ( self : Optional[int] ):
if not self.model_tester.is_training:
return
a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
a : Dict = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
a : Union[str, Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
a : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
a : Dict = model(**__lowerCamelCase ).loss
loss.backward()
def lowercase_ ( self : Union[str, Any] ):
a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a : Tuple = False
a : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
a : List[str] = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
a : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
a : Tuple = model(**__lowerCamelCase ).loss
loss.backward()
def lowercase_ ( self : str ):
a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : int = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
a : Tuple = problem_type["title"]
a : Optional[Any] = problem_type["num_labels"]
a : List[str] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
a : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
a : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
a : int = inputs["labels"].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
a : Optional[Any] = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowercase_ ( self : List[Any] ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCamelCase__ ( ):
a : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a__( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Tuple ):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self : Any ):
a : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
__lowerCamelCase )
a : List[Any] = self.default_image_processor
a : List[Any] = prepare_img()
a : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
a : List[str] = model(**__lowerCamelCase )
# verify the logits
a : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
a : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowercase_ ( self : Union[str, Any] ):
a : str = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
a : Dict = self.default_image_processor
a : Optional[int] = prepare_img()
a : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors='pt' )
a : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : List[str] = model(__lowerCamelCase ) | 717 |
'''simple docstring'''
def lowerCamelCase__ ( _A = 6008_5147_5143 ):
try:
a : Optional[int] = int(_A )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
a : Any = 1
a : Union[str, Any] = 2
while i * i <= n:
while n % i == 0:
a : str = i
n //= i
i += 1
if n > 1:
a : Any = n
return int(_A )
if __name__ == "__main__":
print(F"{solution() = }") | 195 | 0 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : List[str] = """EncodecFeatureExtractor"""
a_ : List[Any] = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , A , A ):
super().__init__(A , A )
_lowerCamelCase : Optional[Any] = self.feature_extractor
_lowerCamelCase : str = False
def _lowerCAmelCase ( self , A=None , A=None , A=True ):
return self.tokenizer.get_decoder_prompt_ids(task=A , language=A , no_timestamps=A )
def __call__( self , *A , **A ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A , **A )
_lowerCamelCase : List[Any] = kwargs.pop('audio' , A )
_lowerCamelCase : int = kwargs.pop('sampling_rate' , A )
_lowerCamelCase : Any = kwargs.pop('text' , A )
if len(A ) > 0:
_lowerCamelCase : List[Any] = args[0]
_lowerCamelCase : int = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
_lowerCamelCase : Tuple = self.tokenizer(A , **A )
if audio is not None:
_lowerCamelCase : List[Any] = self.feature_extractor(A , *A , sampling_rate=A , **A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_lowerCamelCase : Optional[int] = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
_lowerCamelCase : Dict = audio_inputs['padding_mask']
return inputs
def _lowerCAmelCase ( self , *A , **A ):
_lowerCamelCase : str = kwargs.pop('audio' , A )
_lowerCamelCase : str = kwargs.pop('padding_mask' , A )
if len(A ) > 0:
_lowerCamelCase : List[Any] = args[0]
_lowerCamelCase : Dict = args[1:]
if audio_values is not None:
return self._decode_audio(A , padding_mask=A )
else:
return self.tokenizer.batch_decode(*A , **A )
def _lowerCAmelCase ( self , *A , **A ):
return self.tokenizer.decode(*A , **A )
def _lowerCAmelCase ( self , A , A = None ):
_lowerCamelCase : Dict = to_numpy(A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = audio_values.shape
if padding_mask is None:
return list(A )
_lowerCamelCase : Dict = to_numpy(A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_lowerCamelCase : Union[str, Any] = seq_len - padding_mask.shape[-1]
_lowerCamelCase : int = 1 - self.feature_extractor.padding_value
_lowerCamelCase : Union[str, Any] = np.pad(A , ((0, 0), (0, difference)) , 'constant' , constant_values=A )
_lowerCamelCase : Tuple = audio_values.tolist()
for i in range(A ):
_lowerCamelCase : Tuple = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_lowerCamelCase : List[Any] = sliced_audio.reshape(A , -1 )
return audio_values
| 437 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Dict = TextToVideoSDPipeline
a_ : Dict = TEXT_TO_IMAGE_PARAMS
a_ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a_ : str = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
_lowerCamelCase : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_lowerCamelCase : str = CLIPTextModel(A )
_lowerCamelCase : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCamelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _lowerCAmelCase ( self , A , A=0 ):
if str(A ).startswith('mps' ):
_lowerCamelCase : Tuple = torch.manual_seed(A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A )
_lowerCamelCase : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Dict = TextToVideoSDPipeline(**A )
_lowerCamelCase : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(A )
_lowerCamelCase : Union[str, Any] = 'np'
_lowerCamelCase : Optional[int] = sd_pipe(**A ).frames
_lowerCamelCase : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_lowerCamelCase : Tuple = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
_lowerCamelCase : Dict = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_lowerCamelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowerCamelCase : Tuple = pipe.to('cuda' )
_lowerCamelCase : str = 'Spiderman is surfing'
_lowerCamelCase : Any = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase : Union[str, Any] = pipe(A , generator=A , num_inference_steps=25 , output_type='pt' ).frames
_lowerCamelCase : Any = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
_lowerCamelCase : int = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_lowerCamelCase : Optional[Any] = pipe.to('cuda' )
_lowerCamelCase : Tuple = 'Spiderman is surfing'
_lowerCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase : Tuple = pipe(A , generator=A , num_inference_steps=2 , output_type='pt' ).frames
_lowerCamelCase : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 437 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = False
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
__lowerCAmelCase = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
__lowerCAmelCase = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
__lowerCAmelCase = reader.read()
__lowerCAmelCase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
__lowerCAmelCase = UNetaDModel(**config)
else:
__lowerCAmelCase = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
__lowerCAmelCase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__lowerCAmelCase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__lowerCAmelCase = config[key]
del config[key]
__lowerCAmelCase = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
__lowerCAmelCase = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
__lowerCAmelCase = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
__lowerCAmelCase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
__lowerCAmelCase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
__lowerCAmelCase = param_value
__lowerCAmelCase = True
if not has_changed:
__lowerCAmelCase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder)) | 708 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 1
__lowerCAmelCase : bool = True
__lowerCAmelCase : bool = False
__lowerCAmelCase : bool = False
__lowerCAmelCase : bool = False
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase = []
lowercase = []
for i in range(self.num_layers ):
lowercase = self.in_channels if i == 0 else self.out_channels
lowercase = FlaxResnetBlockaD(
in_channels=a , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
lowercase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(a )
lowercase = resnets
lowercase = attentions
if self.add_downsample:
lowercase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , a : Tuple , a : Union[str, Any] , a : Tuple , a : str=True ) -> Tuple:
"""simple docstring"""
lowercase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
lowercase = resnet(a , a , deterministic=a )
lowercase = attn(a , a , deterministic=a )
output_states += (hidden_states,)
if self.add_downsample:
lowercase = self.downsamplers_a(a )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 1
__lowerCAmelCase : bool = True
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase = []
for i in range(self.num_layers ):
lowercase = self.in_channels if i == 0 else self.out_channels
lowercase = FlaxResnetBlockaD(
in_channels=a , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
lowercase = resnets
if self.add_downsample:
lowercase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , a : Tuple , a : List[Any] , a : Optional[Any]=True ) -> Tuple:
"""simple docstring"""
lowercase = ()
for resnet in self.resnets:
lowercase = resnet(a , a , deterministic=a )
output_states += (hidden_states,)
if self.add_downsample:
lowercase = self.downsamplers_a(a )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 1
__lowerCAmelCase : bool = True
__lowerCAmelCase : bool = False
__lowerCAmelCase : bool = False
__lowerCAmelCase : bool = False
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase = []
lowercase = []
for i in range(self.num_layers ):
lowercase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase = self.prev_output_channel if i == 0 else self.out_channels
lowercase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
lowercase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(a )
lowercase = resnets
lowercase = attentions
if self.add_upsample:
lowercase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , a : Optional[int] , a : Optional[int] , a : Optional[int] , a : List[str] , a : Dict=True ) -> List[Any]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
lowercase = res_hidden_states_tuple[-1]
lowercase = res_hidden_states_tuple[:-1]
lowercase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase = resnet(a , a , deterministic=a )
lowercase = attn(a , a , deterministic=a )
if self.add_upsample:
lowercase = self.upsamplers_a(a )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 1
__lowerCAmelCase : bool = True
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowercase = []
for i in range(self.num_layers ):
lowercase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase = self.prev_output_channel if i == 0 else self.out_channels
lowercase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
lowercase = resnets
if self.add_upsample:
lowercase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , a : Any , a : Any , a : Tuple , a : Dict=True ) -> Optional[Any]:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
lowercase = res_hidden_states_tuple[-1]
lowercase = res_hidden_states_tuple[:-1]
lowercase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase = resnet(a , a , deterministic=a )
if self.add_upsample:
lowercase = self.upsamplers_a(a )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 1
__lowerCAmelCase : bool = False
__lowerCAmelCase : bool = False
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
# there is always at least one resnet
lowercase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
lowercase = []
for _ in range(self.num_layers ):
lowercase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(a )
lowercase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
lowercase = resnets
lowercase = attentions
def __call__( self : List[Any] , a : Optional[int] , a : Tuple , a : List[Any] , a : List[str]=True ) -> Optional[Any]:
"""simple docstring"""
lowercase = self.resnets[0](a , a )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
lowercase = attn(a , a , deterministic=a )
lowercase = resnet(a , a , deterministic=a )
return hidden_states | 396 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Tuple , __A: Dict ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
_A = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowercase )
def __A ( self: Any ) -> Optional[Any]:
_A = '''sshleifer/tiny-gpt2'''
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowercase , multi_process=_lowercase , )
_A = TensorFlowBenchmark(_lowercase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self: str ) -> int:
_A = '''sgugger/tiny-distilbert-classification'''
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , )
_A = TensorFlowBenchmark(_lowercase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self: Optional[Any] ) -> str:
_A = '''sshleifer/tiny-gpt2'''
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
_A = TensorFlowBenchmark(_lowercase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self: Optional[Any] ) -> Tuple:
_A = '''sshleifer/tiny-gpt2'''
_A = AutoConfig.from_pretrained(_lowercase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowercase , multi_process=_lowercase , )
_A = TensorFlowBenchmark(_lowercase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self: Tuple ) -> Optional[Any]:
_A = '''sshleifer/tiny-gpt2'''
_A = AutoConfig.from_pretrained(_lowercase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
_A = TensorFlowBenchmark(_lowercase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self: Dict ) -> str:
_A = '''sshleifer/tiny-gpt2'''
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
_A = TensorFlowBenchmark(_lowercase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self: Any ) -> Tuple:
_A = '''sshleifer/tiny-gpt2'''
_A = AutoConfig.from_pretrained(_lowercase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
_A = TensorFlowBenchmark(_lowercase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self: Dict ) -> List[Any]:
_A = '''patrickvonplaten/t5-tiny-random'''
_A = AutoConfig.from_pretrained(_lowercase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
_A = TensorFlowBenchmark(_lowercase , configs=[config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def __A ( self: List[str] ) -> Optional[Any]:
_A = '''sshleifer/tiny-gpt2'''
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_lowercase , multi_process=_lowercase , )
_A = TensorFlowBenchmark(_lowercase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self: Union[str, Any] ) -> List[Any]:
_A = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(_lowercase , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(_lowercase , '''env.csv''' ) , multi_process=_lowercase , )
_A = TensorFlowBenchmark(_lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowercase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''env.csv''' ) ).exists() )
def __A ( self: Union[str, Any] ) -> List[str]:
_A = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(__A: Union[str, Any] ):
self.assertTrue(hasattr(_lowercase , '''sequential''' ) )
self.assertTrue(hasattr(_lowercase , '''cumulative''' ) )
self.assertTrue(hasattr(_lowercase , '''current''' ) )
self.assertTrue(hasattr(_lowercase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , '''log.txt''' ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , eager_mode=_lowercase , multi_process=_lowercase , )
_A = TensorFlowBenchmark(_lowercase )
_A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowercase , '''log.txt''' ) ).exists() )
| 484 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowercase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = SamImageProcessor()
UpperCAmelCase__ = SamProcessor(_lowercase )
processor.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : Optional[int] , **_lowercase : str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
UpperCAmelCase__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=_lowercase )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(_lowercase , return_tensors="np" )
UpperCAmelCase__ = processor(images=_lowercase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=_lowercase )
UpperCAmelCase__ = [torch.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = [[17_64, 26_46]]
UpperCAmelCase__ = [[6_83, 10_24]]
UpperCAmelCase__ = processor.post_process_masks(_lowercase , _lowercase , _lowercase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
UpperCAmelCase__ = processor.post_process_masks(
_lowercase , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
UpperCAmelCase__ = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = processor.post_process_masks(_lowercase , np.array(_lowercase ) , np.array(_lowercase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
UpperCAmelCase__ = [[1, 0], [0, 1]]
with self.assertRaises(_lowercase ):
UpperCAmelCase__ = processor.post_process_masks(_lowercase , np.array(_lowercase ) , np.array(_lowercase ) )
@require_vision
@require_tf
class lowercase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = SamImageProcessor()
UpperCAmelCase__ = SamProcessor(_lowercase )
processor.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : Union[str, Any] , **_lowercase : int ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
UpperCAmelCase__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=_lowercase )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(_lowercase , return_tensors="np" )
UpperCAmelCase__ = processor(images=_lowercase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=_lowercase )
UpperCAmelCase__ = [tf.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = [[17_64, 26_46]]
UpperCAmelCase__ = [[6_83, 10_24]]
UpperCAmelCase__ = processor.post_process_masks(_lowercase , _lowercase , _lowercase , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
UpperCAmelCase__ = processor.post_process_masks(
_lowercase , tf.convert_to_tensor(_lowercase ) , tf.convert_to_tensor(_lowercase ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
UpperCAmelCase__ = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = processor.post_process_masks(
_lowercase , np.array(_lowercase ) , np.array(_lowercase ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
UpperCAmelCase__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCAmelCase__ = processor.post_process_masks(
_lowercase , np.array(_lowercase ) , np.array(_lowercase ) , return_tensors="tf" )
@require_vision
@require_torchvision
class lowercase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = SamImageProcessor()
UpperCAmelCase__ = SamProcessor(_lowercase )
processor.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : str , **_lowercase : Optional[int] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=_lowercase )
UpperCAmelCase__ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCAmelCase__ = [tf.convert_to_tensor(_lowercase )]
UpperCAmelCase__ = [torch.tensor(_lowercase )]
UpperCAmelCase__ = [[17_64, 26_46]]
UpperCAmelCase__ = [[6_83, 10_24]]
UpperCAmelCase__ = processor.post_process_masks(
_lowercase , _lowercase , _lowercase , return_tensors="tf" )
UpperCAmelCase__ = processor.post_process_masks(
_lowercase , _lowercase , _lowercase , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=_lowercase )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(_lowercase , return_tensors="pt" )["pixel_values"].numpy()
UpperCAmelCase__ = processor(images=_lowercase , return_tensors="pt" )["pixel_values"].numpy()
UpperCAmelCase__ = image_processor(_lowercase , return_tensors="tf" )["pixel_values"].numpy()
UpperCAmelCase__ = processor(images=_lowercase , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
| 475 | 0 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Any , __A : Any , __A : Tuple=2 , __A : List[str]=3 , __A : str=4 , __A : Any=2 , __A : str=7 , __A : Optional[Any]=True , __A : int=True , __A : str=True , __A : Optional[int]=True , __A : str=9_9 , __A : List[Any]=3_6 , __A : Union[str, Any]=3 , __A : Any=4 , __A : Union[str, Any]=3_7 , __A : List[str]="gelu" , __A : int=0.1 , __A : str=0.1 , __A : Tuple=5_1_2 , __A : List[str]=1_6 , __A : List[str]=2 , __A : int=0.0_2 , __A : List[str]=6 , __A : int=6 , __A : List[Any]=3 , __A : Union[str, Any]=4 , __A : Optional[int]=None , __A : Tuple=1_0_0_0 , ):
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = image_size
_lowercase = patch_size
_lowercase = text_seq_length
_lowercase = is_training
_lowercase = use_input_mask
_lowercase = use_token_type_ids
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = coordinate_size
_lowercase = shape_size
_lowercase = num_labels
_lowercase = num_choices
_lowercase = scope
_lowercase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowercase = text_seq_length
_lowercase = (image_size // patch_size) ** 2 + 1
_lowercase = self.text_seq_length + self.image_seq_length
def snake_case ( self : List[Any] ):
"""simple docstring"""
_lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase = bbox[i, j, 3]
_lowercase = bbox[i, j, 1]
_lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase = bbox[i, j, 2]
_lowercase = bbox[i, j, 0]
_lowercase = t
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_input_mask:
_lowercase = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowercase = None
if self.use_token_type_ids:
_lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowercase = None
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowercase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case ( self : List[str] , __A : str , __A : int , __A : Optional[int] , __A : Any , __A : str , __A : Union[str, Any] , __A : List[str] , __A : Dict ):
"""simple docstring"""
_lowercase = LayoutLMvaModel(config=__A )
model.to(__A )
model.eval()
# text + image
_lowercase = model(__A , pixel_values=__A )
_lowercase = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A )
_lowercase = model(__A , bbox=__A , pixel_values=__A , token_type_ids=__A )
_lowercase = model(__A , bbox=__A , pixel_values=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowercase = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowercase = model(pixel_values=__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def snake_case ( self : Optional[int] , __A : Optional[Any] , __A : Dict , __A : str , __A : Dict , __A : Optional[int] , __A : int , __A : List[str] , __A : List[Any] ):
"""simple docstring"""
_lowercase = self.num_labels
_lowercase = LayoutLMvaForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowercase = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : List[Any] , __A : List[Any] , __A : str , __A : Any , __A : str , __A : Tuple , __A : int , __A : List[Any] , __A : int ):
"""simple docstring"""
_lowercase = self.num_labels
_lowercase = LayoutLMvaForTokenClassification(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def snake_case ( self : Tuple , __A : Dict , __A : Optional[Any] , __A : List[str] , __A : Tuple , __A : Optional[int] , __A : List[str] , __A : int , __A : Optional[Any] ):
"""simple docstring"""
_lowercase = LayoutLMvaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : List[Any] ):
"""simple docstring"""
_lowercase = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = config_and_inputs
_lowercase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def snake_case ( self : List[Any] , __A : Optional[int] , __A : Union[str, Any] , __A : Optional[Any] , __A : str , __A : str ):
"""simple docstring"""
return True
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = LayoutLMvaModelTester(self )
_lowercase = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def snake_case ( self : Union[str, Any] , __A : List[str] , __A : Union[str, Any] , __A : Tuple=False ):
"""simple docstring"""
_lowercase = copy.deepcopy(__A )
if model_class in get_values(__A ):
_lowercase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__A , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
_lowercase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in get_values(__A ):
_lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
_lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
_lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
_lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__A , )
return inputs_dict
def snake_case ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase = type
self.model_tester.create_and_check_model(*__A )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def snake_case ( self : List[Any] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@slow
def snake_case ( self : List[str] ):
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = LayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def A__ ( ) -> Optional[int]:
_lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : List[str] ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def snake_case ( self : List[Any] ):
"""simple docstring"""
_lowercase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(__A )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__A , return_tensors="pt" ).pixel_values.to(__A )
_lowercase = torch.tensor([[1, 2]] )
_lowercase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowercase = model(
input_ids=input_ids.to(__A ) , bbox=bbox.to(__A ) , pixel_values=pixel_values.to(__A ) , )
# verify the logits
_lowercase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , __A )
_lowercase = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1e-4 ) )
| 711 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__magic_name__ : Optional[Any] = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
__magic_name__ : Tuple = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
__magic_name__ : Any = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def snake_case ( self : List[Any] , __A : int , __A : List[Any] , __A : List[str]=None , __A : Tuple=False , __A : Dict=False , __A : Optional[int]=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowercase = np.array([re.sub(__A , "" , __A ) for x in predictions] )
_lowercase = np.array([re.sub(__A , "" , __A ) for x in references] )
else:
_lowercase = np.asarray(__A )
_lowercase = np.asarray(__A )
if ignore_case:
_lowercase = np.char.lower(__A )
_lowercase = np.char.lower(__A )
if ignore_punctuation:
_lowercase = string.punctuation.maketrans("" , "" , string.punctuation )
_lowercase = np.char.translate(__A , table=__A )
_lowercase = np.char.translate(__A , table=__A )
if ignore_numbers:
_lowercase = string.digits.maketrans("" , "" , string.digits )
_lowercase = np.char.translate(__A , table=__A )
_lowercase = np.char.translate(__A , table=__A )
_lowercase = predictions == references
return {"exact_match": np.mean(__A ) * 1_0_0}
| 602 | 0 |
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
from torch.utils.cpp_extension import load
_A = Path(_snake_case ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
_A = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , _snake_case , with_cuda=_snake_case , extra_include_paths=[str(_snake_case )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 2 |
def _lowercase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ):
if height >= 1:
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
move_disk(__UpperCamelCase , __UpperCamelCase )
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _lowercase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ):
print("""moving disk from""" , __UpperCamelCase , """to""" , __UpperCamelCase )
def _lowercase ( ):
snake_case__ = int(input("""Height of hanoi: """ ).strip() )
move_tower(__UpperCamelCase , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 214 | 0 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class lowerCAmelCase__ ( lowerCAmelCase_ ):
def __init__( self : int , **A__ : Dict ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**A__ )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(A__ )
def __lowerCAmelCase ( self : int , **A__ : Tuple ) -> Optional[int]:
'''simple docstring'''
a__ : str = {}
a__ : List[Any] = {}
a__ : Any = {}
# preprocess args
if "points_per_batch" in kwargs:
a__ : List[Any] = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
a__ : Union[str, Any] = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
a__ : Any = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
a__ : List[Any] = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
a__ : Dict = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
a__ : Any = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
a__ : Dict = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
a__ : int = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
a__ : Union[str, Any] = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
a__ : Tuple = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
a__ : Tuple = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
a__ : List[Any] = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Any , A__ : List[str] , *A__ : str , A__ : int=None , A__ : Union[str, Any]=None , **A__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(A__ , *A__ , num_workers=A__ , batch_size=A__ , **A__ )
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[Any] , A__ : int=6_4 , A__ : int = 0 , A__ : float = 5_1_2 / 1_5_0_0 , A__ : Optional[int] = 3_2 , A__ : Optional[int] = 1 , ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = load_image(A__ )
a__ : Dict = self.image_processor.size['''longest_edge''']
a__ : List[Any] = self.image_processor.generate_crop_boxes(
A__ , A__ , A__ , A__ , A__ , A__ )
a__ : int = self.image_processor(images=A__ , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
a__ : List[str] = self.get_inference_context()
with inference_context():
a__ : Dict = self._ensure_tensor_on_device(A__ , device=self.device )
a__ : List[Any] = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
a__ : Any = image_embeddings
a__ : int = grid_points.shape[1]
a__ : Union[str, Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , A__ , A__ ):
a__ : Dict = grid_points[:, i : i + points_per_batch, :, :]
a__ : List[Any] = input_labels[:, i : i + points_per_batch]
a__ : Union[str, Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def __lowerCAmelCase ( self : str , A__ : Union[str, Any] , A__ : Optional[int]=0.88 , A__ : List[Any]=0.95 , A__ : Optional[int]=0 , A__ : Union[str, Any]=1 , ) -> Tuple:
'''simple docstring'''
a__ : Tuple = model_inputs.pop('''input_boxes''' )
a__ : Tuple = model_inputs.pop('''is_last''' )
a__ : str = model_inputs.pop('''original_sizes''' ).tolist()
a__ : Optional[Any] = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
a__ : Union[str, Any] = self.model(**A__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
a__ : int = model_outputs['''pred_masks''']
a__ : List[Any] = self.image_processor.post_process_masks(
A__ , A__ , A__ , A__ , binarize=A__ )
a__ : Optional[int] = model_outputs['''iou_scores''']
a__ : Dict = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , A__ , A__ , A__ , A__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def __lowerCAmelCase ( self : str , A__ : List[Any] , A__ : Optional[int]=False , A__ : List[Any]=False , A__ : Tuple=0.7 , ) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = []
a__ : Tuple = []
a__ : List[str] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
a__ : List[Any] = torch.cat(A__ )
a__ : str = torch.cat(A__ )
a__ : Tuple = self.image_processor.post_process_for_mask_generation(
A__ , A__ , A__ , A__ )
a__ : Tuple = defaultdict(A__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(A__ )
a__ : Dict = {}
if output_rle_mask:
a__ : Dict = rle_mask
if output_bboxes_mask:
a__ : Tuple = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 700 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] ):
# Construct model
if gpta_config_file == "":
a__ : str = GPTaConfig()
else:
a__ : List[str] = GPTaConfig.from_json_file(lowerCAmelCase__ )
a__ : Optional[int] = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
a__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
a__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 340 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase : str = logging.get_logger(__name__)
lowercase : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase : str = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
lowercase : List[Any] = {
"gpt2": 1024,
"gpt2-medium": 1024,
"gpt2-large": 1024,
"gpt2-xl": 1024,
"distilgpt2": 1024,
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : List[Any] = VOCAB_FILES_NAMES
lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] = ['input_ids', 'attention_mask']
lowercase : Tuple = GPTaTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="<|endoftext|>" , __UpperCamelCase="<|endoftext|>" , __UpperCamelCase="<|endoftext|>" , __UpperCamelCase=False , **__UpperCamelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
__UpperCamelCase : Any = kwargs.pop("add_bos_token" , __UpperCamelCase )
__UpperCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
__UpperCamelCase : Dict = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
__UpperCamelCase : List[Any] = add_prefix_space
__UpperCamelCase : str = pre_tok_class(**__UpperCamelCase )
__UpperCamelCase : Optional[Any] = add_prefix_space
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
__UpperCamelCase : Dict = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
__UpperCamelCase : str = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
__UpperCamelCase : List[Any] = input_ids[-self.model_max_length :]
return input_ids | 327 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def UpperCAmelCase_ (_lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
__UpperCamelCase : int = quote(_lowerCAmelCase )
return hfh.hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" , revision=_lowerCAmelCase ) | 327 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : str = '''umt5'''
a_ : Dict = ['''past_key_values''']
def __init__(self , UpperCAmelCase=2_5_0_1_1_2 , UpperCAmelCase=5_1_2 , UpperCAmelCase=6_4 , UpperCAmelCase=1_0_2_4 , UpperCAmelCase=8 , UpperCAmelCase=None , UpperCAmelCase=6 , UpperCAmelCase=3_2 , UpperCAmelCase=1_2_8 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-6 , UpperCAmelCase=1.0 , UpperCAmelCase="gated-gelu" , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="T5Tokenizer" , UpperCAmelCase=True , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=0 , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=UpperCAmelCase , tokenizer_class=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , pad_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , **UpperCAmelCase , )
__UpperCAmelCase =vocab_size
__UpperCAmelCase =d_model
__UpperCAmelCase =d_kv
__UpperCAmelCase =d_ff
__UpperCAmelCase =num_layers
__UpperCAmelCase =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__UpperCAmelCase =num_heads
__UpperCAmelCase =relative_attention_num_buckets
__UpperCAmelCase =relative_attention_max_distance
__UpperCAmelCase =dropout_rate
__UpperCAmelCase =layer_norm_epsilon
__UpperCAmelCase =initializer_factor
__UpperCAmelCase =feed_forward_proj
__UpperCAmelCase =use_cache
__UpperCAmelCase =self.feed_forward_proj.split('''-''')
__UpperCAmelCase =act_info[-1]
__UpperCAmelCase =act_info[0] == '''gated'''
if len(UpperCAmelCase) > 1 and act_info[0] != "gated" or len(UpperCAmelCase) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
if feed_forward_proj == "gated-gelu":
__UpperCAmelCase ='''gelu_new'''
@property
def A__ (self):
'''simple docstring'''
return self.d_model
@property
def A__ (self):
'''simple docstring'''
return self.num_heads
@property
def A__ (self):
'''simple docstring'''
return self.num_layers
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def A__ (self):
'''simple docstring'''
__UpperCAmelCase ={
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
__UpperCAmelCase ='''past_encoder_sequence + sequence'''
__UpperCAmelCase ={0: '''batch'''}
__UpperCAmelCase ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__UpperCAmelCase ={0: '''batch''', 1: '''decoder_sequence'''}
__UpperCAmelCase ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction='''inputs''')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def A__ (self):
'''simple docstring'''
return 1_3
@property
def A__ (self):
'''simple docstring'''
return 5e-4
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 142 | 0 |
def lowerCamelCase_(lowerCamelCase_ ) -> bool:
UpperCAmelCase = 0
for ch in input_str:
UpperCAmelCase = ord(lowerCamelCase_ )
UpperCAmelCase = pow(2 , lowerCamelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ ( A__ ):
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "embed_dim" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_heads" ) )
class __magic_name__ :
def __init__( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]=13 , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Optional[Any]=[16, 48, 96] , UpperCamelCase__ : int=[1, 3, 6] , UpperCamelCase__ : int=[1, 2, 10] , UpperCamelCase__ : List[str]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : List[Any]=[2, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : List[str]=[0.0, 0.0, 0.0] , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : int=1e-1_2 , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=2 , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_sizes
UpperCAmelCase = patch_stride
UpperCAmelCase = patch_padding
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = num_labels
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = num_heads
UpperCAmelCase = stride_kv
UpperCAmelCase = depth
UpperCAmelCase = cls_token
UpperCAmelCase = attention_drop_rate
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = CvtModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ )
UpperCAmelCase = (self.image_size, self.image_size)
UpperCAmelCase , UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = CvtForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( A__, A__, unittest.TestCase ):
lowercase : Union[str, Any] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
lowercase : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
lowercase : Dict =False
lowercase : Optional[Any] =False
lowercase : Union[str, Any] =False
lowercase : List[Any] =False
lowercase : Optional[int] =False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = CvtModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return
@unittest.skip(reason="Cvt does not output attentions" )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(UpperCamelCase__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] ):
UpperCAmelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CvtModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_() -> Union[str, Any]:
UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
'''simple docstring'''
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**UpperCamelCase__ )
# verify the logits
UpperCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
UpperCAmelCase = torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 323 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowercase ( _a , _a=False ):
snake_case_ : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __lowercase ( _a , _a , _a=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ : Optional[int] = ''''''
else:
snake_case_ : Tuple = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : Any = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
snake_case_ : int = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case_ : Tuple = in_proj_bias[: config.hidden_size]
snake_case_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : Dict = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : Optional[int] = in_proj_bias[-config.hidden_size :]
def __lowercase ( _a ):
snake_case_ : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __lowercase ( _a ):
snake_case_ : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __lowercase ( _a , _a , _a ):
snake_case_ : Dict = dct.pop(__UpperCAmelCase )
snake_case_ : List[str] = val
def __lowercase ( _a , _a ):
snake_case_ : Tuple = ViTMSNConfig()
snake_case_ : int = 1_000
snake_case_ : Tuple = '''datasets/huggingface/label-files'''
snake_case_ : Optional[int] = '''imagenet-1k-id2label.json'''
snake_case_ : Dict = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase ) , '''r''' ) )
snake_case_ : Any = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case_ : Dict = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
snake_case_ : Dict = 384
snake_case_ : List[Any] = 1_536
snake_case_ : Tuple = 6
elif "l16" in checkpoint_url:
snake_case_ : Optional[Any] = 1_024
snake_case_ : int = 4_096
snake_case_ : List[str] = 24
snake_case_ : Dict = 16
snake_case_ : Any = 0.1
elif "b4" in checkpoint_url:
snake_case_ : Union[str, Any] = 4
elif "l7" in checkpoint_url:
snake_case_ : Any = 7
snake_case_ : Dict = 1_024
snake_case_ : Any = 4_096
snake_case_ : List[Any] = 24
snake_case_ : str = 16
snake_case_ : List[Any] = 0.1
snake_case_ : Optional[int] = ViTMSNModel(__UpperCAmelCase )
snake_case_ : Optional[Any] = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='''cpu''' )['''target_encoder''']
snake_case_ : List[Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(__UpperCAmelCase )
snake_case_ : List[Any] = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , base_model=__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
snake_case_ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : int = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
snake_case_ : Optional[Any] = ViTImageProcessor(
size=config.image_size , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase )
snake_case_ : str = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
snake_case_ : int = model(**__UpperCAmelCase )
snake_case_ : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
snake_case_ : Tuple = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
snake_case_ : Union[str, Any] = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
snake_case_ : List[Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
snake_case_ : List[Any] = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
snake_case_ : Optional[Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __UpperCAmelCase , atol=1E-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase__ : int = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 713 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __lowercase ( _a , _a , _a ):
snake_case_ : Tuple = AutoConfig.from_pretrained(_a )
snake_case_ : Tuple = FlaxAutoModelForSeqaSeqLM.from_config(config=_a )
snake_case_ : Union[str, Any] = checkpoints.load_tax_checkpoint(_a )
snake_case_ : Optional[int] = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
snake_case_ : str = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
snake_case_ : List[str] = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : Dict = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
snake_case_ : Any = f"layers_{str(_a )}"
# Self-Attention
snake_case_ : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
snake_case_ : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
snake_case_ : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
snake_case_ : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
snake_case_ : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
snake_case_ : int = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
snake_case_ : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
snake_case_ : int = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
snake_case_ : str = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
snake_case_ : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
snake_case_ : Optional[Any] = flax_model.params['''encoder''']['''block'''][str(_a )]['''layer''']
snake_case_ : List[str] = tax_attention_key
snake_case_ : Optional[Any] = tax_attention_out
snake_case_ : Any = tax_attention_query
snake_case_ : str = tax_attention_value
snake_case_ : Dict = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
snake_case_ : Any = tax_mlp_wi_a
snake_case_ : List[Any] = tax_mlp_wi_a
else:
snake_case_ : Union[str, Any] = tax_mlp_wi
snake_case_ : List[Any] = tax_mlp_wo
snake_case_ : int = tax_mlp_layer_norm
snake_case_ : Any = flax_model_encoder_layer_block
# Only for layer 0:
snake_case_ : Optional[int] = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
snake_case_ : Any = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : List[str] = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
snake_case_ : Tuple = tax_encoder_global_rel_embedding
# Assigning
snake_case_ : Dict = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
snake_case_ : Any = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
snake_case_ : Tuple = f"layers_{str(_a )}"
# Self-Attention
snake_case_ : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
snake_case_ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
snake_case_ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
snake_case_ : str = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
snake_case_ : Any = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
snake_case_ : Optional[Any] = tax_enc_dec_attention_module['''key''']['''kernel''']
snake_case_ : str = tax_enc_dec_attention_module['''out''']['''kernel''']
snake_case_ : Union[str, Any] = tax_enc_dec_attention_module['''query''']['''kernel''']
snake_case_ : List[str] = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
snake_case_ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
snake_case_ : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
snake_case_ : Dict = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
snake_case_ : Dict = flax_model.params['''decoder''']['''block'''][str(_a )]['''layer''']
snake_case_ : int = tax_attention_key
snake_case_ : List[Any] = tax_attention_out
snake_case_ : Any = tax_attention_query
snake_case_ : Dict = tax_attention_value
snake_case_ : str = tax_pre_attention_layer_norm
snake_case_ : Any = tax_enc_dec_attention_key
snake_case_ : str = tax_enc_dec_attention_out
snake_case_ : int = tax_enc_dec_attention_query
snake_case_ : Any = tax_enc_dec_attention_value
snake_case_ : Optional[Any] = tax_cross_layer_norm
if split_mlp_wi:
snake_case_ : Tuple = tax_mlp_wi_a
snake_case_ : List[Any] = tax_mlp_wi_a
else:
snake_case_ : List[Any] = tax_mlp_wi
snake_case_ : Dict = tax_mlp_wo
snake_case_ : List[Any] = txa_mlp_layer_norm
snake_case_ : Optional[int] = flax_model_decoder_layer_block
# Decoder Normalization
snake_case_ : str = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
snake_case_ : Tuple = txa_decoder_norm
# Only for layer 0:
snake_case_ : str = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
snake_case_ : Optional[Any] = tax_decoder_rel_embedding
# Token Embeddings
snake_case_ : Union[str, Any] = tax_model['''target''']['''token_embedder''']['''embedding''']
snake_case_ : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
snake_case_ : Union[str, Any] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(_a )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
lowercase__ : Dict = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 485 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.