code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : List[str] = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A_ ( lowercase__ ):
'''simple docstring'''
a__ = "segformer"
def __init__(self , lowercase__=3 , lowercase__=4 , lowercase__=[2, 2, 2, 2] , lowercase__=[8, 4, 2, 1] , lowercase__=[32, 64, 160, 256] , lowercase__=[7, 3, 3, 3] , lowercase__=[4, 2, 2, 2] , lowercase__=[1, 2, 5, 8] , lowercase__=[4, 4, 4, 4] , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=0.1 , lowercase__=1E-6 , lowercase__=256 , lowercase__=255 , **lowercase__ , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , SCREAMING_SNAKE_CASE_ , )
__UpperCAmelCase = num_channels
__UpperCAmelCase = num_encoder_blocks
__UpperCAmelCase = depths
__UpperCAmelCase = sr_ratios
__UpperCAmelCase = hidden_sizes
__UpperCAmelCase = patch_sizes
__UpperCAmelCase = strides
__UpperCAmelCase = mlp_ratios
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = classifier_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = decoder_hidden_size
__UpperCAmelCase = kwargs.get('''reshape_last_stage''' , SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = semantic_loss_ignore_index
class A_ ( lowercase__ ):
'''simple docstring'''
a__ = version.parse("1.11" )
@property
def lowerCAmelCase_ (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase_ (self ) -> float:
return 1E-4
@property
def lowerCAmelCase_ (self ) -> int:
return 12
| 303 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = DownBlockaD # noqa F405
lowercase = '''down'''
def UpperCAmelCase (self : int ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = ResnetDownsampleBlockaD # noqa F405
lowercase = '''down'''
def UpperCAmelCase (self : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnDownBlockaD # noqa F405
lowercase = '''down'''
def UpperCAmelCase (self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = CrossAttnDownBlockaD # noqa F405
lowercase = '''down'''
def UpperCAmelCase (self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
def UpperCAmelCase (self : List[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = SimpleCrossAttnDownBlockaD # noqa F405
lowercase = '''down'''
@property
def UpperCAmelCase (self : Dict ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' ,'''MPS result is not consistent''' )
def UpperCAmelCase (self : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = SkipDownBlockaD # noqa F405
lowercase = '''down'''
@property
def UpperCAmelCase (self : List[Any] ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnSkipDownBlockaD # noqa F405
lowercase = '''down'''
@property
def UpperCAmelCase (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Any ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = DownEncoderBlockaD # noqa F405
lowercase = '''down'''
@property
def UpperCAmelCase (self : int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase (self : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnDownEncoderBlockaD # noqa F405
lowercase = '''down'''
@property
def UpperCAmelCase (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase (self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = UNetMidBlockaD # noqa F405
lowercase = '''mid'''
def UpperCAmelCase (self : List[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase (self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = UNetMidBlockaDCrossAttn # noqa F405
lowercase = '''mid'''
def UpperCAmelCase (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
def UpperCAmelCase (self : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowercase = '''mid'''
@property
def UpperCAmelCase (self : Union[str, Any] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : int ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
def UpperCAmelCase (self : str ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = UpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = ResnetUpsampleBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Optional[int] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = CrossAttnUpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
def UpperCAmelCase (self : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = SimpleCrossAttnUpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Any ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ,include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
def UpperCAmelCase (self : Tuple ) -> Any:
"""simple docstring"""
lowerCAmelCase = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnUpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : List[str] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
@unittest.skipIf(torch_device == '''mps''' ,'''MPS result is not consistent''' )
def UpperCAmelCase (self : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = SkipUpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Optional[int] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : str ) -> int:
"""simple docstring"""
lowerCAmelCase = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnSkipUpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Optional[Any] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Any ) -> int:
"""simple docstring"""
lowerCAmelCase = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = UpDecoderBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : List[str] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = {'''in_channels''': 32, '''out_channels''': 32}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnUpDecoderBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Any ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = {'''in_channels''': 32, '''out_channels''': 32}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(SCREAMING_SNAKE_CASE_ )
| 535 | 0 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _lowercase ( lowerCamelCase__ ) -> bytes:
"""simple docstring"""
if len(lowerCamelCase__ ) != 32:
raise ValueError("Input must be of length 32" )
__UpperCAmelCase : Union[str, Any] = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _lowercase ( lowerCamelCase__ ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
__UpperCAmelCase : Optional[Any] = format(lowerCamelCase__ , "08x" )[-8:]
__UpperCAmelCase : Optional[Any] = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def _lowercase ( lowerCamelCase__ ) -> bytes:
"""simple docstring"""
__UpperCAmelCase : int = B""""""
for char in message:
bit_string += format(lowerCamelCase__ , "08b" ).encode("utf-8" )
__UpperCAmelCase : Optional[Any] = format(len(lowerCamelCase__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowerCamelCase__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _lowercase ( lowerCamelCase__ ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowerCamelCase__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(lowerCamelCase__ ) , 512 ):
__UpperCAmelCase : List[str] = bit_string[pos : pos + 512]
__UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
__UpperCAmelCase : List[str] = format(lowerCamelCase__ , "032b" )
__UpperCAmelCase : int = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowerCamelCase__ , 2 )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _lowercase ( lowerCamelCase__ ) -> bytes:
"""simple docstring"""
__UpperCAmelCase : List[str] = preprocess(lowerCamelCase__ )
__UpperCAmelCase : Tuple = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__UpperCAmelCase : Union[str, Any] = 0X6_7_4_5_2_3_0_1
__UpperCAmelCase : Tuple = 0Xe_f_c_d_a_b_8_9
__UpperCAmelCase : int = 0X9_8_b_a_d_c_f_e
__UpperCAmelCase : Dict = 0X1_0_3_2_5_4_7_6
__UpperCAmelCase : str = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowerCamelCase__ ):
__UpperCAmelCase : Optional[int] = aa
__UpperCAmelCase : Any = ba
__UpperCAmelCase : List[Any] = ca
__UpperCAmelCase : int = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__UpperCAmelCase : Optional[int] = d ^ (b & (c ^ d))
__UpperCAmelCase : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__UpperCAmelCase : Dict = c ^ (d & (b ^ c))
__UpperCAmelCase : int = (5 * i + 1) % 16
elif i <= 47:
__UpperCAmelCase : Union[str, Any] = b ^ c ^ d
__UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
__UpperCAmelCase : int = c ^ (b | not_aa(lowerCamelCase__ ))
__UpperCAmelCase : Optional[Any] = (7 * i) % 16
__UpperCAmelCase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32
__UpperCAmelCase : List[Any] = d
__UpperCAmelCase : int = c
__UpperCAmelCase : str = b
__UpperCAmelCase : str = sum_aa(lowerCamelCase__ , left_rotate_aa(lowerCamelCase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__UpperCAmelCase : Dict = sum_aa(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : List[Any] = sum_aa(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = sum_aa(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : Dict = sum_aa(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = reformat_hex(lowerCamelCase__ ) + reformat_hex(lowerCamelCase__ ) + reformat_hex(lowerCamelCase__ ) + reformat_hex(lowerCamelCase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10 | 0 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() )
@pytest.fixture
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] ,A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = metric_id
class __lowercase :
snake_case_ = [MetricMock(__lowerCamelCase ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() )
@pytest.mark.parametrize(
"""func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if "tmp_path" in args:
UpperCAmelCase__ : Any = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(__UpperCamelCase , match="""https://huggingface.co/docs/evaluate""" ):
func(*__UpperCamelCase )
| 65 |
"""simple docstring"""
UpperCamelCase__ :Tuple = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
UpperCamelCase__ :List[str] = frozenset(["""prompt""", """negative_prompt"""])
UpperCamelCase__ :Tuple = frozenset([])
UpperCamelCase__ :Union[str, Any] = frozenset(["""image"""])
UpperCamelCase__ :str = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
UpperCamelCase__ :Any = frozenset(["""image"""])
UpperCamelCase__ :Optional[int] = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
UpperCamelCase__ :int = frozenset(["""prompt""", """image""", """negative_prompt"""])
UpperCamelCase__ :Tuple = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
UpperCamelCase__ :Any = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
UpperCamelCase__ :Dict = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
UpperCamelCase__ :Dict = frozenset(["""image""", """mask_image"""])
UpperCamelCase__ :Tuple = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
UpperCamelCase__ :List[str] = frozenset(["""example_image""", """image""", """mask_image"""])
UpperCamelCase__ :List[str] = frozenset(["""class_labels"""])
UpperCamelCase__ :Union[str, Any] = frozenset(["""class_labels"""])
UpperCamelCase__ :Any = frozenset(["""batch_size"""])
UpperCamelCase__ :Optional[Any] = frozenset([])
UpperCamelCase__ :Optional[Any] = frozenset(["""batch_size"""])
UpperCamelCase__ :Optional[int] = frozenset([])
UpperCamelCase__ :str = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
UpperCamelCase__ :Union[str, Any] = frozenset(["""prompt""", """negative_prompt"""])
UpperCamelCase__ :Tuple = frozenset(["""input_tokens"""])
UpperCamelCase__ :int = frozenset(["""input_tokens"""])
| 355 | 0 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[Any] ):
"""simple docstring"""
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
_lowerCAmelCase = 4
_lowerCAmelCase = (1 << p) - 1
for _ in range(p - 2 ):
_lowerCAmelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 707 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 491 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : int , ):
UpperCamelCase_: Dict = parent
UpperCamelCase_: Any = 13
UpperCamelCase_: Dict = 7
UpperCamelCase_: Union[str, Any] = True
UpperCamelCase_: Optional[Any] = True
UpperCamelCase_: Dict = True
UpperCamelCase_: List[str] = 99
UpperCamelCase_: Optional[Any] = 32
UpperCamelCase_: Union[str, Any] = 2
UpperCamelCase_: Optional[Any] = 4
UpperCamelCase_: List[str] = 37
UpperCamelCase_: Any = 'gelu'
UpperCamelCase_: Dict = 0.1
UpperCamelCase_: List[str] = 0.1
UpperCamelCase_: Union[str, Any] = 512
UpperCamelCase_: Any = 16
UpperCamelCase_: Optional[int] = 2
UpperCamelCase_: str = 0.02
UpperCamelCase_: str = 3
UpperCamelCase_: int = 4
UpperCamelCase_: Optional[int] = None
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase_: Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_: Optional[Any] = None
UpperCamelCase_: Dict = None
UpperCamelCase_: Any = None
if self.use_labels:
UpperCamelCase_: Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_: Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_: List[str] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : List[Any] ):
(
UpperCamelCase_
): List[Any] = self.prepare_config_and_inputs()
UpperCamelCase_: str = True
UpperCamelCase_: Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase_: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
UpperCamelCase_: Optional[int] = TFEsmModel(config=snake_case_ )
UpperCamelCase_: int = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCamelCase_: Optional[int] = model(snake_case_ )
UpperCamelCase_: Union[str, Any] = [input_ids, input_mask]
UpperCamelCase_: Any = model(snake_case_ )
UpperCamelCase_: Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : int , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , ):
UpperCamelCase_: Optional[Any] = True
UpperCamelCase_: Any = TFEsmModel(config=snake_case_ )
UpperCamelCase_: Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
UpperCamelCase_: Optional[int] = model(snake_case_ )
UpperCamelCase_: List[Any] = [input_ids, input_mask]
UpperCamelCase_: str = model(snake_case_ , encoder_hidden_states=snake_case_ )
# Also check the case where encoder outputs are not passed
UpperCamelCase_: Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : str , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ):
UpperCamelCase_: List[Any] = TFEsmForMaskedLM(config=snake_case_ )
UpperCamelCase_: Union[str, Any] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Any , snake_case_ : Any , snake_case_ : str , snake_case_ : Tuple ):
UpperCamelCase_: List[str] = self.num_labels
UpperCamelCase_: Tuple = TFEsmForTokenClassification(config=snake_case_ )
UpperCamelCase_: Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCamelCase_: Any = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: str = self.prepare_config_and_inputs()
(
UpperCamelCase_
): Optional[Any] = config_and_inputs
UpperCamelCase_: Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCamelCase : Optional[int] = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase : int = False
__UpperCamelCase : str = False
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: int = TFEsmModelTester(self )
UpperCamelCase_: Optional[int] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self : Tuple ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: Any = TFEsmModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase__ ( self : Any ):
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase__ ( self : Optional[int] ):
pass
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Union[str, Any] = model_class(snake_case_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCamelCase_: Tuple = model.get_bias()
assert isinstance(snake_case_ , snake_case_ )
for k, v in name.items():
assert isinstance(snake_case_ , tf.Variable )
else:
UpperCamelCase_: Union[str, Any] = model.get_output_embeddings()
assert x is None
UpperCamelCase_: Dict = model.get_bias()
assert name is None
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: str = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
UpperCamelCase_: Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase_: str = model(snake_case_ )[0]
UpperCamelCase_: int = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , snake_case_ )
# compare the actual values for a slice.
UpperCamelCase_: Dict = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: int = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
UpperCamelCase_: int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase_: Any = model(snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase_: int = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 548 |
"""simple docstring"""
from math import ceil, sqrt
def UpperCAmelCase_ ( __a : int = 1_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_lowerCamelCase : Optional[int] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_lowerCamelCase : Any = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"{solution() = }")
| 437 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , __lowerCamelCase , )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = RobertaConfig
_UpperCamelCase : Dict = """roberta"""
def __init__( self , snake_case ):
super().__init__(snake_case )
lowercase = RobertaEmbeddings(snake_case )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , __lowerCamelCase , )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = RobertaConfig
_UpperCamelCase : List[str] = """roberta"""
def __init__( self , snake_case ):
super().__init__(snake_case )
lowercase = config.num_labels
lowercase = config.num_hidden_layers
lowercase = DeeRobertaModel(snake_case )
lowercase = nn.Dropout(config.hidden_dropout_prob )
lowercase = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=-1 , snake_case=False , ):
lowercase = self.num_layers
try:
lowercase = self.roberta(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , )
lowercase = outputs[1]
lowercase = self.dropout(snake_case )
lowercase = self.classifier(snake_case )
lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase = e.message
lowercase = e.exit_layer
lowercase = outputs[0]
if not self.training:
lowercase = entropy(snake_case )
lowercase = []
lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowercase = []
for highway_exit in outputs[-1]:
lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(snake_case )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(snake_case )
if train_highway:
lowercase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowercase = (loss,) + outputs
if not self.training:
lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 717 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
lowercase = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**snake_case )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=snake_case , prev_timestep=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(variance_type='fixed_small_log' )
lowercase = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(variance_type='learned_range' )
lowercase = scheduler_class(**snake_case )
lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=snake_case ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=snake_case ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=snake_case ) - -0.0_010_011 < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**snake_case )
lowercase = scheduler.timesteps
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = torch.manual_seed(0 )
for i, t in enumerate(snake_case ):
# 1. predict noise residual
lowercase = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
lowercase = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(snake_case ) )
lowercase = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**snake_case )
scheduler.set_timesteps(25 )
lowercase = scheduler.timesteps
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = torch.manual_seed(0 )
for i, t in enumerate(snake_case ):
# 1. predict noise residual
lowercase = model(snake_case , snake_case )
if i + 1 == timesteps.shape[0]:
lowercase = None
else:
lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase = scheduler.step(
snake_case , snake_case , snake_case , prev_timestep=snake_case , generator=snake_case ).prev_sample
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(snake_case ) )
lowercase = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
| 565 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
__a = field(
default=A , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__a = field(
default=A , metadata={"""help""": """The column name of the images in the files."""} )
__a = field(default=A , metadata={"""help""": """A folder containing the training data."""} )
__a = field(default=A , metadata={"""help""": """A folder containing the validation data."""} )
__a = field(
default=0.1_5 , metadata={"""help""": """Percent to split off of train for validation."""} )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = {}
if self.train_dir is not None:
__UpperCAmelCase : Any = self.train_dir
if self.validation_dir is not None:
__UpperCAmelCase : Tuple = self.validation_dir
__UpperCAmelCase : List[Any] = data_files if data_files else None
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(
default=A , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
__a = field(
default=A , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
__a = field(
default=A , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
__a = field(
default=A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
__a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__a = field(default=A , metadata={"""help""": """Name or path of preprocessor config."""} )
__a = field(
default=A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__a = field(
default=0.7_5 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
__a = field(
default=A , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : int = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__UpperCAmelCase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
__UpperCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__UpperCAmelCase : Dict = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0:
__UpperCAmelCase : str = ds["""train"""].train_test_split(data_args.train_val_split )
__UpperCAmelCase : Optional[Any] = split["""train"""]
__UpperCAmelCase : List[Any] = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Tuple = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__UpperCAmelCase : Optional[int] = ViTMAEConfig.from_pretrained(model_args.config_name , **_UpperCamelCase )
elif model_args.model_name_or_path:
__UpperCAmelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase )
else:
__UpperCAmelCase : str = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__UpperCAmelCase : Any = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_UpperCamelCase )
elif model_args.model_name_or_path:
__UpperCAmelCase : int = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase )
else:
__UpperCAmelCase : Any = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__UpperCAmelCase : str = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
__UpperCAmelCase : Dict = ViTMAEForPreTraining(_UpperCamelCase )
if training_args.do_train:
__UpperCAmelCase : Optional[int] = ds["""train"""].column_names
else:
__UpperCAmelCase : str = ds["""validation"""].column_names
if data_args.image_column_name is not None:
__UpperCAmelCase : str = data_args.image_column_name
elif "image" in column_names:
__UpperCAmelCase : Tuple = """image"""
elif "img" in column_names:
__UpperCAmelCase : Optional[int] = """img"""
else:
__UpperCAmelCase : Union[str, Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__UpperCAmelCase : Tuple = image_processor.size["""shortest_edge"""]
else:
__UpperCAmelCase : List[str] = (image_processor.size["""height"""], image_processor.size["""width"""])
__UpperCAmelCase : Union[str, Any] = Compose(
[
Lambda(lambda _UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(_UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_UpperCamelCase : str ):
__UpperCAmelCase : str = [transforms(_UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
__UpperCAmelCase : Any = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
__UpperCAmelCase : Optional[int] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_UpperCamelCase )
# Compute absolute learning rate
__UpperCAmelCase : str = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__UpperCAmelCase : List[Any] = training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
__UpperCAmelCase : List[Any] = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
__UpperCAmelCase : List[str] = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : str = last_checkpoint
__UpperCAmelCase : List[str] = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCAmelCase : Optional[Any] = trainer.evaluate()
trainer.log_metrics("""eval""" , _UpperCamelCase )
trainer.save_metrics("""eval""" , _UpperCamelCase )
# Write model card and (optionally) push to hub
__UpperCAmelCase : Any = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 139 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
UpperCAmelCase : Dict = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a = field(
default=A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a = field(
default=A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a = field(
default=A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__a = field(
default=A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__a = field(
default=A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(default=A , metadata={"""help""": """The input training data file (a text file)."""} )
__a = field(
default=A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__a = field(
default=A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__a = field(
default=A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__a = field(
default=A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.train_file is not None:
__UpperCAmelCase : List[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__UpperCAmelCase : List[str] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = 42
__a = True
__a = None
__a = None
def __call__( self : Tuple , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = """label""" if """label""" in features[0].keys() else """labels"""
__UpperCAmelCase : Union[str, Any] = [feature.pop(UpperCamelCase ) for feature in features]
__UpperCAmelCase : str = len(UpperCamelCase )
__UpperCAmelCase : Dict = len(features[0]["""input_ids"""] )
__UpperCAmelCase : int = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
__UpperCAmelCase : str = list(chain(*UpperCamelCase ) )
__UpperCAmelCase : int = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
__UpperCAmelCase : Optional[Any] = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__UpperCAmelCase : int = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def lowerCamelCase ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : Any = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__UpperCAmelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__UpperCAmelCase : str = {}
if data_args.train_file is not None:
__UpperCAmelCase : str = data_args.train_file
if data_args.validation_file is not None:
__UpperCAmelCase : Union[str, Any] = data_args.validation_file
__UpperCAmelCase : List[Any] = data_args.train_file.split(""".""" )[-1]
__UpperCAmelCase : Optional[int] = load_dataset(
_UpperCamelCase , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__UpperCAmelCase : Any = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__UpperCAmelCase : Dict = [f'''ending{i}''' for i in range(4 )]
__UpperCAmelCase : Any = """sent1"""
__UpperCAmelCase : List[str] = """sent2"""
if data_args.max_seq_length is None:
__UpperCAmelCase : List[str] = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
__UpperCAmelCase : str = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__UpperCAmelCase : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_UpperCamelCase : str ):
__UpperCAmelCase : List[str] = [[context] * 4 for context in examples[context_name]]
__UpperCAmelCase : Union[str, Any] = examples[question_header_name]
__UpperCAmelCase : int = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(_UpperCamelCase )
]
# Flatten out
__UpperCAmelCase : List[str] = list(chain(*_UpperCamelCase ) )
__UpperCAmelCase : List[Any] = list(chain(*_UpperCamelCase ) )
# Tokenize
__UpperCAmelCase : Optional[int] = tokenizer(
_UpperCamelCase , _UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_UpperCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__UpperCAmelCase : List[Any] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__UpperCAmelCase : Optional[int] = min(len(_UpperCamelCase ) , data_args.max_train_samples )
__UpperCAmelCase : Union[str, Any] = train_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__UpperCAmelCase : List[str] = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__UpperCAmelCase : int = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__UpperCAmelCase : Dict = min(len(_UpperCamelCase ) , data_args.max_eval_samples )
__UpperCAmelCase : Any = eval_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__UpperCAmelCase : Any = eval_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__UpperCAmelCase : Optional[int] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_UpperCamelCase : Dict ):
__UpperCAmelCase ,__UpperCAmelCase : List[str] = eval_predictions
__UpperCAmelCase : Optional[int] = np.argmax(_UpperCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__UpperCAmelCase : Tuple = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , compute_metrics=_UpperCamelCase , )
# Training
if training_args.do_train:
__UpperCAmelCase : str = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : List[str] = last_checkpoint
__UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__UpperCAmelCase : Dict = train_result.metrics
__UpperCAmelCase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
__UpperCAmelCase : List[Any] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics("""train""" , _UpperCamelCase )
trainer.save_metrics("""train""" , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCAmelCase : List[str] = trainer.evaluate()
__UpperCAmelCase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics("""eval""" , _UpperCamelCase )
trainer.save_metrics("""eval""" , _UpperCamelCase )
__UpperCAmelCase : Tuple = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 139 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
__lowerCAmelCase : int =generate_large_matrix()
__lowerCAmelCase : int =(
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
lowercase = 0
lowercase = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowercase = (left + right) // 2
lowercase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowercase = mid + 1
else:
lowercase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict ) -> int:
'''simple docstring'''
lowercase = 0
lowercase = len(grid[0] )
for i in range(len(lowercase_ ) ):
lowercase = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def UpperCAmelCase__ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print("""Running benchmarks""" )
lowercase = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowercase = timeit(f'{func}(grid=grid)' , setup=lowercase_ , number=5_0_0 )
print(f'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 707 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Any =logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] ={
"""facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Optional[Any] = 'timesformer'
def __init__( self , __lowerCAmelCase=224 , __lowerCAmelCase=16 , __lowerCAmelCase=3 , __lowerCAmelCase=8 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=True , __lowerCAmelCase="divided_space_time" , __lowerCAmelCase=0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = num_frames
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = qkv_bias
lowercase = attention_type
lowercase = drop_path_rate
| 197 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple:
UpperCAmelCase__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'visual_bert'
def __init__( self , lowercase=30_522 , lowercase=768 , lowercase=512 , lowercase=12 , lowercase=12 , lowercase=3_072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=False , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ) -> int:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = visual_embedding_dim
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = bypass_transformer
lowerCAmelCase = special_visual_initialize
| 532 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowercase_ = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class A__ ( tr.AbstractTransform ):
def __init__( self , lowerCamelCase = " " ) -> List[str]:
"""simple docstring"""
__magic_name__ : List[str] = sentence_delimiter
def lowercase ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return list(lowerCamelCase )
def lowercase ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
__magic_name__ : Tuple = []
for sent_idx, sentence in enumerate(lowerCamelCase ):
chars.extend(self.process_string(lowerCamelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowercase_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowercase_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowercase_ = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowercase_ = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
lowercase_ = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ) -> Optional[int]:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase , lowerCamelCase , truth_transform=lowerCamelCase , hypothesis_transform=lowerCamelCase , )["wer"]
__magic_name__ : Optional[Any] = 0
__magic_name__ : Tuple = 0
for prediction, reference in zip(lowerCamelCase , lowerCamelCase ):
__magic_name__ : Tuple = jiwer.compute_measures(
lowerCamelCase , lowerCamelCase , truth_transform=lowerCamelCase , hypothesis_transform=lowerCamelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 336 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
lowercase_ = logging.getLogger(__name__)
@dataclass
class A__ :
lowerCamelCase__ : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase__ : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase__ : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase__ : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase__ : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase__ : str =field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase__ : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class A__ :
lowerCamelCase__ : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase__ : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase__ : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase__ : Optional[int] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase__ : Optional[int] =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase__ : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase__ : Optional[int] =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase__ : Optional[int] =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase ( self ) -> Any:
"""simple docstring"""
if self.train_file is not None:
__magic_name__ : List[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__magic_name__ : str = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A__ :
lowerCamelCase__ : PreTrainedTokenizerBase
lowerCamelCase__ : Union[bool, str, PaddingStrategy] =True
lowerCamelCase__ : Optional[int] =None
lowerCamelCase__ : Optional[int] =None
def __call__( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
__magic_name__ : Union[str, Any] = '''label''' if '''label''' in features[0].keys() else '''labels'''
__magic_name__ : Tuple = [feature.pop(lowerCamelCase ) for feature in features]
__magic_name__ : str = len(lowerCamelCase )
__magic_name__ : List[Any] = len(features[0]['''input_ids'''] )
__magic_name__ : Any = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase )] for feature in features
]
__magic_name__ : Any = list(chain(*lowerCamelCase ) )
__magic_name__ : str = self.tokenizer.pad(
lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
__magic_name__ : str = {k: v.view(lowerCamelCase , lowerCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__magic_name__ : Optional[Any] = torch.tensor(lowerCamelCase , dtype=torch.intaa )
return batch
def lowerCAmelCase ( ) ->Dict:
"""simple docstring"""
__magic_name__ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''', UpperCAmelCase, UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__magic_name__ : Tuple = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase )
datasets.utils.logging.set_verbosity(UpperCAmelCase )
transformers.utils.logging.set_verbosity(UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__magic_name__ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__magic_name__ : int = {}
if data_args.train_file is not None:
__magic_name__ : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__magic_name__ : str = data_args.validation_file
__magic_name__ : Union[str, Any] = data_args.train_file.split('''.''' )[-1]
__magic_name__ : Optional[int] = load_dataset(
UpperCAmelCase, data_files=UpperCAmelCase, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
# Downloading and loading the swag dataset from the hub.
__magic_name__ : str = load_dataset(
'''swag''', '''regular''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
__magic_name__ : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
__magic_name__ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=UpperCAmelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__magic_name__ : int = [F'''ending{i}''' for i in range(4 )]
__magic_name__ : List[Any] = '''sent1'''
__magic_name__ : Union[str, Any] = '''sent2'''
if data_args.max_seq_length is None:
__magic_name__ : Union[str, Any] = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
__magic_name__ : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__magic_name__ : Dict = min(data_args.max_seq_length, tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCAmelCase ):
__magic_name__ : Optional[Any] = [[context] * 4 for context in examples[context_name]]
__magic_name__ : List[Any] = examples[question_header_name]
__magic_name__ : List[Any] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(UpperCAmelCase )
]
# Flatten out
__magic_name__ : int = list(chain(*UpperCAmelCase ) )
__magic_name__ : Tuple = list(chain(*UpperCAmelCase ) )
# Tokenize
__magic_name__ : List[Any] = tokenizer(
UpperCAmelCase, UpperCAmelCase, truncation=UpperCAmelCase, max_length=UpperCAmelCase, padding='''max_length''' if data_args.pad_to_max_length else False, )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(UpperCAmelCase ), 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
__magic_name__ : int = raw_datasets['''train''']
if data_args.max_train_samples is not None:
__magic_name__ : Union[str, Any] = min(len(UpperCAmelCase ), data_args.max_train_samples )
__magic_name__ : int = train_dataset.select(range(UpperCAmelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
__magic_name__ : Optional[int] = train_dataset.map(
UpperCAmelCase, batched=UpperCAmelCase, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
__magic_name__ : Dict = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
__magic_name__ : Dict = min(len(UpperCAmelCase ), data_args.max_eval_samples )
__magic_name__ : int = eval_dataset.select(range(UpperCAmelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
__magic_name__ : Optional[Any] = eval_dataset.map(
UpperCAmelCase, batched=UpperCAmelCase, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
# Data collator
__magic_name__ : List[str] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase, pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCAmelCase ):
__magic_name__ , __magic_name__ : str = eval_predictions
__magic_name__ : List[Any] = np.argmax(UpperCAmelCase, axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__magic_name__ : Tuple = Trainer(
model=UpperCAmelCase, args=UpperCAmelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=UpperCAmelCase, data_collator=UpperCAmelCase, compute_metrics=UpperCAmelCase, )
# Training
if training_args.do_train:
__magic_name__ : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
__magic_name__ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__magic_name__ : Union[str, Any] = last_checkpoint
__magic_name__ : Tuple = trainer.train(resume_from_checkpoint=UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__magic_name__ : List[Any] = train_result.metrics
__magic_name__ : List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase )
)
__magic_name__ : List[Any] = min(UpperCAmelCase, len(UpperCAmelCase ) )
trainer.log_metrics('''train''', UpperCAmelCase )
trainer.save_metrics('''train''', UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__magic_name__ : int = trainer.evaluate()
__magic_name__ : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase )
__magic_name__ : str = min(UpperCAmelCase, len(UpperCAmelCase ) )
trainer.log_metrics('''eval''', UpperCAmelCase )
trainer.save_metrics('''eval''', UpperCAmelCase )
__magic_name__ : Union[str, Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase )
else:
trainer.create_model_card(**UpperCAmelCase )
def lowerCAmelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 336 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__magic_name__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__magic_name__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
snake_case__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ):
for attribute in key.split('''.''' ):
lowercase : Union[str, Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
lowercase : str = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
lowercase : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase : str = value
elif weight_type == "weight_g":
lowercase : Optional[Any] = value
elif weight_type == "weight_v":
lowercase : Optional[int] = value
elif weight_type == "bias":
lowercase : int = value
else:
lowercase : List[str] = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ):
lowercase : Union[str, Any] = []
lowercase : Union[str, Any] = fairseq_model.state_dict()
lowercase : List[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Any = True
if "*" in mapped_key:
lowercase : Tuple = name.split(UpperCAmelCase_ )[0].split('''.''' )[-2]
lowercase : List[Any] = mapped_key.replace('''*''' , UpperCAmelCase_ )
if "weight_g" in name:
lowercase : int = '''weight_g'''
elif "weight_v" in name:
lowercase : Tuple = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
lowercase : Union[str, Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Optional[int] = '''weight'''
else:
lowercase : List[str] = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ):
lowercase : Optional[Any] = full_name.split('''conv_layers.''' )[-1]
lowercase : List[str] = name.split('''.''' )
lowercase : List[str] = int(items[0] )
lowercase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase : Dict = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]=None ):
# load the pre-trained checkpoints
lowercase : Tuple = torch.load(UpperCAmelCase_ )
lowercase : Any = WavLMConfigOrig(checkpoint['''cfg'''] )
lowercase : Any = WavLMOrig(UpperCAmelCase_ )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
lowercase : Optional[int] = WavLMConfig.from_pretrained(UpperCAmelCase_ )
else:
lowercase : List[Any] = WavLMConfig()
lowercase : Tuple = WavLMModel(UpperCAmelCase_ )
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ )
hf_wavlm.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
snake_case__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 583 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase :
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=100 , _snake_case=13 , _snake_case=30 , _snake_case=2 , _snake_case=3 , _snake_case=True , _snake_case=True , _snake_case=32 , _snake_case=4 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=10 , _snake_case=0.02 , _snake_case=3 , _snake_case=None , _snake_case=[0, 1, 2, 3] , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = 100
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = out_indices
UpperCAmelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ) -> int:
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> Any:
"""simple docstring"""
UpperCAmelCase = BeitModel(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> Dict:
"""simple docstring"""
UpperCAmelCase = BeitForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = BeitForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = BeitForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = BeitForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = BeitModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_snake_case )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_snake_case )
def snake_case_ ( self ) -> str:
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_snake_case ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.train()
UpperCAmelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
UpperCAmelCase = model(**_snake_case ).loss
loss.backward()
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase = False
UpperCAmelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase = model_class(_snake_case )
model.gradient_checkpointing_enable()
model.to(_snake_case )
model.train()
UpperCAmelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
UpperCAmelCase = model(**_snake_case ).loss
loss.backward()
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=_snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = BeitModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(_snake_case )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_snake_case , return_tensors='''pt''' ).pixel_values.to(_snake_case )
# prepare bool_masked_pos
UpperCAmelCase = torch.ones((1, 196) , dtype=torch.bool ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(pixel_values=_snake_case , bool_masked_pos=_snake_case )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , _snake_case )
UpperCAmelCase = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _snake_case , atol=1e-2 ) )
@slow
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(_snake_case )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**_snake_case )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , _snake_case )
UpperCAmelCase = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_snake_case )
self.assertTrue(torch.allclose(logits[0, :3] , _snake_case , atol=1e-4 ) )
UpperCAmelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , _snake_case )
@slow
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
_snake_case )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**_snake_case )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , _snake_case )
UpperCAmelCase = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_snake_case )
self.assertTrue(torch.allclose(logits[0, :3] , _snake_case , atol=1e-4 ) )
UpperCAmelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , _snake_case )
@slow
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
UpperCAmelCase = model.to(_snake_case )
UpperCAmelCase = BeitImageProcessor(do_resize=_snake_case , size=640 , do_center_crop=_snake_case )
UpperCAmelCase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase = Image.open(ds[0]['''file'''] )
UpperCAmelCase = image_processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**_snake_case )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _snake_case )
UpperCAmelCase = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
UpperCAmelCase = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=_snake_case , )
else:
UpperCAmelCase = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=_snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _snake_case , atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
UpperCAmelCase = model.to(_snake_case )
UpperCAmelCase = BeitImageProcessor(do_resize=_snake_case , size=640 , do_center_crop=_snake_case )
UpperCAmelCase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase = Image.open(ds[0]['''file'''] )
UpperCAmelCase = image_processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**_snake_case )
UpperCAmelCase = outputs.logits.detach().cpu()
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(500, 300)] )
UpperCAmelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _snake_case )
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
UpperCAmelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 391 | 0 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float ) -> float:
return 10 - x * x
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(SCREAMING_SNAKE_CASE ) * equation(SCREAMING_SNAKE_CASE ) >= 0:
raise ValueError("""Wrong space!""" )
__lowerCAmelCase : Union[str, Any] = a
while (b - a) >= 0.01:
# Find middle point
__lowerCAmelCase : Tuple = (a + b) / 2
# Check if middle point is root
if equation(SCREAMING_SNAKE_CASE ) == 0.0:
break
# Decide the side to repeat the steps
if equation(SCREAMING_SNAKE_CASE ) * equation(SCREAMING_SNAKE_CASE ) < 0:
__lowerCAmelCase : int = c
else:
__lowerCAmelCase : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 504 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
_UpperCAmelCase = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any] ) -> Any:
__lowerCAmelCase : List[Any] = EfficientNetConfig()
__lowerCAmelCase : Tuple = CONFIG_MAP[model_name]["""hidden_dim"""]
__lowerCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""]
__lowerCAmelCase : Dict = CONFIG_MAP[model_name]["""depth_coef"""]
__lowerCAmelCase : str = CONFIG_MAP[model_name]["""image_size"""]
__lowerCAmelCase : Any = CONFIG_MAP[model_name]["""dropout_rate"""]
__lowerCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""]
__lowerCAmelCase : str = """huggingface/label-files"""
__lowerCAmelCase : Dict = """imagenet-1k-id2label.json"""
__lowerCAmelCase : str = 1_000
__lowerCAmelCase : Optional[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase : Optional[int] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowerCAmelCase : Dict = idalabel
__lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
return config
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
__lowerCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
__lowerCAmelCase : int = CONFIG_MAP[model_name]["""image_size"""]
__lowerCAmelCase : int = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=SCREAMING_SNAKE_CASE , )
return preprocessor
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> Any:
__lowerCAmelCase : str = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__lowerCAmelCase : int = sorted(set(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {b: str(SCREAMING_SNAKE_CASE ) for b, i in zip(SCREAMING_SNAKE_CASE , range(SCREAMING_SNAKE_CASE ) )}
__lowerCAmelCase : Union[str, Any] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__lowerCAmelCase : List[Any] = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__lowerCAmelCase : str = {}
for item in rename_keys:
if item[0] in original_param_names:
__lowerCAmelCase : Tuple = """efficientnet.""" + item[1]
__lowerCAmelCase : Union[str, Any] = """classifier.weight"""
__lowerCAmelCase : Optional[Any] = """classifier.bias"""
return key_mapping
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple ) -> List[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
__lowerCAmelCase : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
__lowerCAmelCase : List[str] = torch.from_numpy(SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__lowerCAmelCase : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__lowerCAmelCase : Dict = torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE ) )
else:
__lowerCAmelCase : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any ) -> List[str]:
__lowerCAmelCase : List[str] = model_classes[model_name](
include_top=SCREAMING_SNAKE_CASE , weights="""imagenet""" , input_tensor=SCREAMING_SNAKE_CASE , input_shape=SCREAMING_SNAKE_CASE , pooling=SCREAMING_SNAKE_CASE , classes=1_000 , classifier_activation="""softmax""" , )
__lowerCAmelCase : int = original_model.trainable_variables
__lowerCAmelCase : Tuple = original_model.non_trainable_variables
__lowerCAmelCase : Optional[int] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__lowerCAmelCase : int = param.numpy()
__lowerCAmelCase : int = list(tf_params.keys() )
# Load HuggingFace model
__lowerCAmelCase : int = get_efficientnet_config(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = EfficientNetForImageClassification(SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Union[str, Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__lowerCAmelCase : Any = rename_keys(SCREAMING_SNAKE_CASE )
replace_params(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
__lowerCAmelCase : Dict = convert_image_processor(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__lowerCAmelCase : Dict = hf_model(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = outputs.logits.detach().numpy()
# Original model inference
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : int = CONFIG_MAP[model_name]["""image_size"""]
__lowerCAmelCase : Dict = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__lowerCAmelCase : Optional[int] = image.img_to_array(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.expand_dims(SCREAMING_SNAKE_CASE , axis=0 )
__lowerCAmelCase : Any = original_model.predict(SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
os.mkdir(SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
__lowerCAmelCase : Tuple = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
_UpperCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) | 504 | 1 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def lowerCAmelCase_ ( __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: str ={
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
lowerCamelCase__: str =bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCamelCase__: Dict =BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCAmelCase_ , output_all_encodings=lowerCAmelCase_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCAmelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCamelCase__: int ='''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
lowerCamelCase__: str =os.path.join(get_home_dir() , "models" )
lowerCamelCase__: Optional[int] =_load_vocab(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , cls=lowerCAmelCase_ )
lowerCamelCase__: List[str] =nlp.model.BERTModel(
lowerCAmelCase_ , len(lowerCAmelCase_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCAmelCase_ , use_token_type_embed=lowerCAmelCase_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCAmelCase_ , use_decoder=lowerCAmelCase_ , )
original_bort.load_parameters(lowerCAmelCase_ , cast_dtype=lowerCAmelCase_ , ignore_extra=lowerCAmelCase_ )
lowerCamelCase__: List[Any] =original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCamelCase__: Union[str, Any] ={
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(lowerCAmelCase_ ),
}
lowerCamelCase__: Union[str, Any] =BertConfig.from_dict(lowerCAmelCase_ )
lowerCamelCase__: List[str] =BertForMaskedLM(lowerCAmelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__a ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__a , __a ):
lowerCamelCase__: Dict =hf_param.shape
lowerCamelCase__: Union[str, Any] =to_torch(params[gluon_param] )
lowerCamelCase__: str =gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowerCamelCase__: Union[str, Any] =check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
lowerCamelCase__: Union[str, Any] =check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
lowerCamelCase__: int =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
lowerCamelCase__: int =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCamelCase__: Tuple =torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCamelCase__: BertLayer =hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCamelCase__: BertSelfAttention =layer.attention.self
lowerCamelCase__: List[str] =check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowerCamelCase__: List[str] =check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowerCamelCase__: int =check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowerCamelCase__: List[Any] =check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowerCamelCase__: List[str] =check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowerCamelCase__: Union[str, Any] =check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowerCamelCase__: BertSelfOutput =layer.attention.output
lowerCamelCase__: Union[str, Any] =check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
lowerCamelCase__: str =check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
lowerCamelCase__: Any =check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowerCamelCase__: Tuple =check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowerCamelCase__: BertIntermediate =layer.intermediate
lowerCamelCase__: List[str] =check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowerCamelCase__: Tuple =check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowerCamelCase__: BertOutput =layer.output
lowerCamelCase__: str =check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowerCamelCase__: str =check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowerCamelCase__: Optional[Any] =check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowerCamelCase__: int =check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCamelCase__: Optional[Any] =RobertaTokenizer.from_pretrained("roberta-base" )
lowerCamelCase__: Union[str, Any] =tokenizer.encode_plus(lowerCAmelCase_ )['''input_ids''']
# Get gluon output
lowerCamelCase__: Tuple =mx.nd.array([input_ids] )
lowerCamelCase__: int =original_bort(inputs=lowerCAmelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCAmelCase_ )
lowerCamelCase__: Tuple =BertModel.from_pretrained(lowerCAmelCase_ )
hf_bort_model.eval()
lowerCamelCase__: Dict =tokenizer.encode_plus(lowerCAmelCase_ , return_tensors="pt" )
lowerCamelCase__: List[str] =hf_bort_model(**lowerCAmelCase_ )[0]
lowerCamelCase__: Union[str, Any] =output_gluon[0].asnumpy()
lowerCamelCase__: Dict =output_hf[0].detach().numpy()
lowerCamelCase__: Dict =np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCamelCase__: int =np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowerCAmelCase_ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 703 |
def lowerCAmelCase_ ( __a , __a ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 437 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCamelCase : List[Any] =logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
UpperCamelCase__ : List[Any] = np.argmax(__lowerCAmelCase , axis=1 )
return np.sum(outputs == labels )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
with open(__lowerCAmelCase , encoding="utf_8" ) as f:
UpperCamelCase__ : List[Any] = csv.reader(__lowerCAmelCase )
UpperCamelCase__ : Optional[Any] = []
next(__lowerCAmelCase ) # skip the first line
for line in tqdm(__lowerCAmelCase ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
UpperCamelCase__ : Optional[Any] = []
for dataset in encoded_datasets:
UpperCamelCase__ : Any = len(__lowerCAmelCase )
UpperCamelCase__ : Tuple = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
UpperCamelCase__ : Any = np.zeros((n_batch, 2) , dtype=np.intaa )
UpperCamelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
UpperCamelCase__ : int = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__lowerCAmelCase ):
UpperCamelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCamelCase__ : List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCamelCase__ : Optional[int] = with_conta
UpperCamelCase__ : str = with_conta
UpperCamelCase__ : Any = len(__lowerCAmelCase ) - 1
UpperCamelCase__ : int = len(__lowerCAmelCase ) - 1
UpperCamelCase__ : str = with_conta
UpperCamelCase__ : Any = with_conta
UpperCamelCase__ : Optional[Any] = mc_label
UpperCamelCase__ : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__lowerCAmelCase ) for t in all_inputs ) )
return tensor_datasets
def SCREAMING_SNAKE_CASE ( ) -> Any:
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__lowerCAmelCase , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=__lowerCAmelCase , default="" )
parser.add_argument("--eval_dataset" , type=__lowerCAmelCase , default="" )
parser.add_argument("--seed" , type=__lowerCAmelCase , default=42 )
parser.add_argument("--num_train_epochs" , type=__lowerCAmelCase , default=3 )
parser.add_argument("--train_batch_size" , type=__lowerCAmelCase , default=8 )
parser.add_argument("--eval_batch_size" , type=__lowerCAmelCase , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=__lowerCAmelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=__lowerCAmelCase , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=__lowerCAmelCase , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=__lowerCAmelCase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=__lowerCAmelCase , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=__lowerCAmelCase , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=__lowerCAmelCase , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=__lowerCAmelCase , default=0.0_1 )
parser.add_argument("--lm_coef" , type=__lowerCAmelCase , default=0.9 )
parser.add_argument("--n_valid" , type=__lowerCAmelCase , default=374 )
parser.add_argument("--server_ip" , type=__lowerCAmelCase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=__lowerCAmelCase , default="" , help="Can be used for distant debugging." )
UpperCamelCase__ : Union[str, Any] = parser.parse_args()
print(__lowerCAmelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
UpperCamelCase__ : Optional[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
UpperCamelCase__ : Dict = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(__lowerCAmelCase , __lowerCAmelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
UpperCamelCase__ : List[str] = ["_start_", "_delimiter_", "_classify_"]
UpperCamelCase__ : str = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__lowerCAmelCase )
UpperCamelCase__ : Tuple = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
UpperCamelCase__ : Tuple = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__lowerCAmelCase ) )
model.to(__lowerCAmelCase )
# Load and encode the datasets
def tokenize_and_encode(__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return obj
return [tokenize_and_encode(__lowerCAmelCase ) for o in obj]
logger.info("Encoding dataset..." )
UpperCamelCase__ : Any = load_rocstories_dataset(args.train_dataset )
UpperCamelCase__ : Optional[int] = load_rocstories_dataset(args.eval_dataset )
UpperCamelCase__ : List[str] = (train_dataset, eval_dataset)
UpperCamelCase__ : Union[str, Any] = tokenize_and_encode(__lowerCAmelCase )
# Compute the max input length for the Transformer
UpperCamelCase__ : Dict = model.config.n_positions // 2 - 2
UpperCamelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
UpperCamelCase__ : int = min(__lowerCAmelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
UpperCamelCase__ : Union[str, Any] = pre_process_datasets(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ : int = tensor_datasets[0], tensor_datasets[1]
UpperCamelCase__ : int = TensorDataset(*__lowerCAmelCase )
UpperCamelCase__ : Optional[Any] = RandomSampler(__lowerCAmelCase )
UpperCamelCase__ : List[Any] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.train_batch_size )
UpperCamelCase__ : List[str] = TensorDataset(*__lowerCAmelCase )
UpperCamelCase__ : str = SequentialSampler(__lowerCAmelCase )
UpperCamelCase__ : Optional[Any] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
UpperCamelCase__ : Tuple = args.max_steps
UpperCamelCase__ : Tuple = args.max_steps // (len(__lowerCAmelCase ) // args.gradient_accumulation_steps) + 1
else:
UpperCamelCase__ : Union[str, Any] = len(__lowerCAmelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
UpperCamelCase__ : List[str] = list(model.named_parameters() )
UpperCamelCase__ : Optional[Any] = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
UpperCamelCase__ : List[str] = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
UpperCamelCase__ : str = AdamW(__lowerCAmelCase , lr=args.learning_rate , eps=args.adam_epsilon )
UpperCamelCase__ : List[Any] = get_linear_schedule_with_warmup(
__lowerCAmelCase , num_warmup_steps=args.warmup_steps , num_training_steps=__lowerCAmelCase )
if args.do_train:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Optional[int] = tqdm(__lowerCAmelCase , desc="Training" )
for step, batch in enumerate(__lowerCAmelCase ):
UpperCamelCase__ : Tuple = tuple(t.to(__lowerCAmelCase ) for t in batch )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : str = batch
UpperCamelCase__ : Any = model(__lowerCAmelCase , mc_token_ids=__lowerCAmelCase , lm_labels=__lowerCAmelCase , mc_labels=__lowerCAmelCase )
UpperCamelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
UpperCamelCase__ : List[str] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
UpperCamelCase__ : Any = "Training loss: {:.2e} lr: {:.2e}".format(__lowerCAmelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
UpperCamelCase__ : Any = model.module if hasattr(__lowerCAmelCase , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
UpperCamelCase__ : Any = os.path.join(args.output_dir , __lowerCAmelCase )
UpperCamelCase__ : List[Any] = os.path.join(args.output_dir , __lowerCAmelCase )
torch.save(model_to_save.state_dict() , __lowerCAmelCase )
model_to_save.config.to_json_file(__lowerCAmelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
UpperCamelCase__ : Tuple = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
UpperCamelCase__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__lowerCAmelCase )
if args.do_eval:
model.eval()
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = 0, 0
UpperCamelCase__ , UpperCamelCase__ : str = 0, 0
for batch in tqdm(__lowerCAmelCase , desc="Evaluating" ):
UpperCamelCase__ : Optional[int] = tuple(t.to(__lowerCAmelCase ) for t in batch )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Any = batch
with torch.no_grad():
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = model(
__lowerCAmelCase , mc_token_ids=__lowerCAmelCase , lm_labels=__lowerCAmelCase , mc_labels=__lowerCAmelCase )
UpperCamelCase__ : List[str] = mc_logits.detach().cpu().numpy()
UpperCamelCase__ : List[Any] = mc_labels.to("cpu" ).numpy()
UpperCamelCase__ : Optional[Any] = accuracy(__lowerCAmelCase , __lowerCAmelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
UpperCamelCase__ : Any = eval_loss / nb_eval_steps
UpperCamelCase__ : List[Any] = eval_accuracy / nb_eval_examples
UpperCamelCase__ : List[str] = tr_loss / nb_tr_steps if args.do_train else None
UpperCamelCase__ : Optional[Any] = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
UpperCamelCase__ : int = os.path.join(args.output_dir , "eval_results.txt" )
with open(__lowerCAmelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , __lowerCAmelCase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main() | 228 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __a ( A__ ):
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "num_attention_heads" ) )
class __a :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[Any]=64 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : List[Any]=16 , SCREAMING_SNAKE_CASE : Tuple=[1_28, 2_56, 3_84] , SCREAMING_SNAKE_CASE : Tuple=[4, 6, 8] , SCREAMING_SNAKE_CASE : Dict=[2, 3, 4] , SCREAMING_SNAKE_CASE : Any=[16, 16, 16] , SCREAMING_SNAKE_CASE : List[str]=0 , SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 2] , SCREAMING_SNAKE_CASE : int=[2, 2, 2] , SCREAMING_SNAKE_CASE : str=0.0_2 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : int=2 , ):
'''simple docstring'''
UpperCamelCase__ : Dict = parent
UpperCamelCase__ : Any = batch_size
UpperCamelCase__ : str = image_size
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : str = kernel_size
UpperCamelCase__ : str = stride
UpperCamelCase__ : int = padding
UpperCamelCase__ : int = hidden_sizes
UpperCamelCase__ : Dict = num_attention_heads
UpperCamelCase__ : int = depths
UpperCamelCase__ : Optional[Any] = key_dim
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : List[str] = patch_size
UpperCamelCase__ : str = attention_ratio
UpperCamelCase__ : int = mlp_ratio
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : Union[str, Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase__ : str = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : List[str] = num_labels
UpperCamelCase__ : int = initializer_range
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Tuple ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = LevitModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Any = model(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = (self.image_size, self.image_size)
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase__ : int = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.num_labels
UpperCamelCase__ : Optional[Any] = LevitForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : str = config_and_inputs
UpperCamelCase__ : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( A__ , A__ , unittest.TestCase ):
_lowerCAmelCase : str = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_lowerCAmelCase : List[str] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : List[Any] = False
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = LevitModelTester(self )
UpperCamelCase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not output attentions" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : int = [*signature.parameters.keys()]
UpperCamelCase__ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCamelCase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ : int = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : int = outputs.hidden_states
UpperCamelCase__ : List[Any] = len(self.model_tester.depths ) + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ : int = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase__ : Any = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase__ : Union[str, Any] = False
UpperCamelCase__ : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase__ : int = model_class(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[int] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
UpperCamelCase__ : Optional[int] = problem_type["title"]
UpperCamelCase__ : Tuple = problem_type["num_labels"]
UpperCamelCase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if problem_type["num_labels"] > 1:
UpperCamelCase__ : Optional[int] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCamelCase__ : Tuple = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE ) as warning_list:
UpperCamelCase__ : Any = model(**SCREAMING_SNAKE_CASE ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Union[str, Any] = LevitModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
UpperCamelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) | 228 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Dict =(DEISMultistepScheduler,)
lowercase_ : Dict =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self):
pass
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = DEISMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.23916) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.23916) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,algorithm_type='''deis''' ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,algorithm_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,algorithm_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.23916) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.091) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
| 633 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 633 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = BertTokenizer
_lowercase : int = BertTokenizerFast
_lowercase : Tuple = True
_lowercase : Union[str, Any] = True
_lowercase : Tuple = filter_non_english
def lowerCamelCase_ ( self: List[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowercase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = '''UNwant\u00E9d,running'''
lowercase__ = '''unwanted, running'''
return input_text, output_text
def lowerCamelCase_ ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer_class(self.vocab_file )
lowercase__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCamelCase_ ( self: Tuple ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = '''UNwant\u00E9d,running'''
lowercase__ = tokenizer.tokenize(UpperCamelCase_ )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(UpperCamelCase_ )
lowercase__ = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# With lower casing
lowercase__ = self.get_tokenizer(do_lower_case=UpperCamelCase_ )
lowercase__ = self.get_rust_tokenizer(do_lower_case=UpperCamelCase_ )
lowercase__ = '''UNwant\u00E9d,running'''
lowercase__ = tokenizer.tokenize(UpperCamelCase_ )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(UpperCamelCase_ )
lowercase__ = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def lowerCamelCase_ ( self: List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def lowerCamelCase_ ( self: Any ) -> List[str]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCamelCase_ ( self: int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=UpperCamelCase_ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer()
lowercase__ = '''a\n\'ll !!to?\'d of, can\'t.'''
lowercase__ = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowercase__ = {}
for i, token in enumerate(UpperCamelCase_ ):
lowercase__ = i
lowercase__ = WordpieceTokenizer(vocab=UpperCamelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def lowerCamelCase_ ( self: str ) -> Optional[int]:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def lowerCamelCase_ ( self: Optional[Any] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase__ = tokenizer_r.encode_plus(
UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , )
lowercase__ = tokenizer_r.do_lower_case if hasattr(UpperCamelCase_ , '''do_lower_case''' ) else False
lowercase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = ['''的''', '''人''', '''有''']
lowercase__ = ''''''.join(UpperCamelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__ = True
lowercase__ = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = tokenizer_p.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer_r.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer_r.convert_ids_to_tokens(UpperCamelCase_ )
lowercase__ = tokenizer_p.convert_ids_to_tokens(UpperCamelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = False
lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = tokenizer_r.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer_p.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer_r.convert_ids_to_tokens(UpperCamelCase_ )
lowercase__ = tokenizer_p.convert_ids_to_tokens(UpperCamelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(UpperCamelCase_ )
]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
| 43 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A : Union[str, Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['ConditionalDetrFeatureExtractor']
A : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 516 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : int = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _A ( __a ):
__a = 'deta'
__a = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCamelCase__ = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = backbone_config.pop("model_type" )
lowerCamelCase__ = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = backbone_config
lowerCamelCase__ = num_queries
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = d_model
lowerCamelCase__ = encoder_ffn_dim
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = encoder_attention_heads
lowerCamelCase__ = decoder_ffn_dim
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = decoder_attention_heads
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = activation_function
lowerCamelCase__ = init_std
lowerCamelCase__ = init_xavier_std
lowerCamelCase__ = encoder_layerdrop
lowerCamelCase__ = auxiliary_loss
lowerCamelCase__ = position_embedding_type
# deformable attributes
lowerCamelCase__ = num_feature_levels
lowerCamelCase__ = encoder_n_points
lowerCamelCase__ = decoder_n_points
lowerCamelCase__ = two_stage
lowerCamelCase__ = two_stage_num_proposals
lowerCamelCase__ = with_box_refine
lowerCamelCase__ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowerCamelCase__ = class_cost
lowerCamelCase__ = bbox_cost
lowerCamelCase__ = giou_cost
# Loss coefficients
lowerCamelCase__ = mask_loss_coefficient
lowerCamelCase__ = dice_loss_coefficient
lowerCamelCase__ = bbox_loss_coefficient
lowerCamelCase__ = giou_loss_coefficient
lowerCamelCase__ = eos_coefficient
lowerCamelCase__ = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def _lowerCamelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self ) -> int:
return self.d_model
def _lowerCamelCase ( self ) -> str:
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.backbone_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 274 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger(__name__)
class _A ( __a ):
__a = 'encoder-decoder'
__a = True
def __init__( self , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCamelCase__ = kwargs.pop("encoder" )
lowerCamelCase__ = encoder_config.pop("model_type" )
lowerCamelCase__ = kwargs.pop("decoder" )
lowerCamelCase__ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase__ = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = True
@classmethod
def _lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> PretrainedConfig:
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowerCamelCase__ = True
lowerCamelCase__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> int:
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.encoder.to_dict()
lowerCamelCase__ = self.decoder.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 274 | 1 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase = "▁" , _UpperCamelCase = True , _UpperCamelCase = "<unk>" , _UpperCamelCase = "</s>" , _UpperCamelCase = "<pad>" , ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
UpperCAmelCase_ : Any = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase_ : int = token_dict['token']
UpperCAmelCase_ : List[str] = Tokenizer(Unigram() )
UpperCAmelCase_ : Union[str, Any] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
UpperCAmelCase_ : Any = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ ),
pre_tokenizers.Digits(individual_digits=UpperCAmelCase_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase_ : Any = decoders.Metaspace(replacement=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
UpperCAmelCase_ : str = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
UpperCAmelCase_ : Optional[int] = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = 8_0_0_0 , _UpperCamelCase = True , ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase_ , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase_ , )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase_ : List[str] = [files]
self._tokenizer.train(UpperCAmelCase_ , trainer=UpperCAmelCase_ )
self.add_unk_id()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = 8_0_0_0 , _UpperCamelCase = True , ) -> int:
UpperCAmelCase_ : List[str] = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase_ , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase_ , )
self._tokenizer.train_from_iterator(UpperCAmelCase_ , trainer=UpperCAmelCase_ )
self.add_unk_id()
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : int = json.loads(self._tokenizer.to_str() )
UpperCAmelCase_ : str = self.special_tokens['unk']['id']
UpperCAmelCase_ : Optional[Any] = Tokenizer.from_str(json.dumps(UpperCAmelCase_ ) )
| 406 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__A : List[str] = logging.get_logger(__name__)
__A : Optional[Any] = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[str] = "dpt"
def __init__( self : int , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : int=3072 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Any=1E-12 , UpperCAmelCase_ : List[Any]=384 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[int]=[2, 5, 8, 11] , UpperCAmelCase_ : Optional[Any]="project" , UpperCAmelCase_ : Any=[4, 2, 1, 0.5] , UpperCAmelCase_ : Optional[int]=[96, 192, 384, 768] , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : Optional[Any]=-1 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : str=True , UpperCAmelCase_ : int=0.4 , UpperCAmelCase_ : Optional[Any]=255 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=[1, 1024, 24, 24] , UpperCAmelCase_ : Dict=[0, 1] , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Dict , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Optional[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
lowerCAmelCase : Union[str, Any] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
lowerCAmelCase : Union[str, Any] = BitConfig(**UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
lowerCAmelCase : Dict = BitConfig(**UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : str = backbone_config
else:
raise ValueError(
f"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
lowerCAmelCase : Tuple = backbone_featmap_shape
lowerCAmelCase : Dict = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
lowerCAmelCase : List[str] = None
lowerCAmelCase : str = None
lowerCAmelCase : List[Any] = []
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : int = intermediate_size
lowerCAmelCase : Tuple = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Union[str, Any] = num_channels
lowerCAmelCase : int = qkv_bias
lowerCAmelCase : Tuple = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
lowerCAmelCase : Any = readout_type
lowerCAmelCase : List[str] = reassemble_factors
lowerCAmelCase : Optional[int] = neck_hidden_sizes
lowerCAmelCase : List[Any] = fusion_hidden_size
lowerCAmelCase : List[str] = head_in_index
lowerCAmelCase : List[Any] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase : int = use_auxiliary_head
lowerCAmelCase : Any = auxiliary_loss_weight
lowerCAmelCase : Any = semantic_loss_ignore_index
lowerCAmelCase : str = semantic_classifier_dropout
def lowercase__ ( self : str ):
lowerCAmelCase : Dict = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : Tuple = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
| 343 | 0 |
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__lowercase = mf_knapsack(i - 1 , lowercase , lowercase , lowercase )
else:
__lowercase = max(
mf_knapsack(i - 1 , lowercase , lowercase , lowercase ) , mf_knapsack(i - 1 , lowercase , lowercase , j - wt[i - 1] ) + val[i - 1] , )
__lowercase = val
return f[i][j]
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__lowercase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__lowercase = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if not (isinstance(lowercase , (list, tuple) ) and isinstance(lowercase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
__lowercase = len(lowercase )
if num_items != len(lowercase ):
__lowercase = (
'''The number of weights must be the same as the number of values.\n'''
F"But got {num_items} weights and {len(lowercase )} values"
)
raise ValueError(lowercase )
for i in range(lowercase ):
if not isinstance(wt[i] , lowercase ):
__lowercase = (
'''All weights must be integers but got weight of '''
F"type {type(wt[i] )} at index {i}"
)
raise TypeError(lowercase )
__lowercase , __lowercase = knapsack(lowercase , lowercase , lowercase , lowercase )
__lowercase = set()
_construct_solution(lowercase , lowercase , lowercase , lowercase , lowercase )
return optimal_val, example_optional_set
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase , lowercase , i - 1 , lowercase , lowercase )
else:
optimal_set.add(lowercase )
_construct_solution(lowercase , lowercase , i - 1 , j - wt[i - 1] , lowercase )
if __name__ == "__main__":
__a : List[Any] = [3, 2, 4, 4]
__a : Optional[int] = [4, 3, 2, 3]
__a : Dict = 4
__a : Optional[int] = 6
__a : Union[str, Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__a , __a : Tuple = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__a , __a : int = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset) | 522 | import random
from typing import Any
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
for _ in range(len(lowercase ) ):
__lowercase = random.randint(0 , len(lowercase ) - 1 )
__lowercase = random.randint(0 , len(lowercase ) - 1 )
__lowercase , __lowercase = data[b], data[a]
return data
if __name__ == "__main__":
__a : List[str] = [0, 1, 2, 3, 4, 5, 6, 7]
__a : Any = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 522 | 1 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE , params=__SCREAMING_SNAKE_CASE ).content , 'html.parser' )
lowercase = soup.find('div' , attrs={'class': 'gs_ri'} )
lowercase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 84 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
_SCREAMING_SNAKE_CASE = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] )
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] )
else:
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
if ignore_case:
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
if ignore_punctuation:
_lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
if ignore_numbers:
_lowerCAmelCase = string.digits.maketrans("" , "" , string.digits )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = predictions == references
return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
| 18 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __magic_name__ ( __lowercase ):
"""simple docstring"""
_UpperCamelCase = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase = "document_qa"
_UpperCamelCase = AutoProcessor
_UpperCamelCase = VisionEncoderDecoderModel
_UpperCamelCase = ["image", "text"]
_UpperCamelCase = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*a__ , **a__ )
def _UpperCAmelCase ( self , a__ , a__ ):
_lowerCamelCase = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
_lowerCamelCase = task_prompt.replace('''{user_input}''' , a__ )
_lowerCamelCase = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors='''pt''' ).input_ids
_lowerCamelCase = self.pre_processor(a__ , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _UpperCAmelCase ( self , a__ ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = self.pre_processor.batch_decode(a__ )[0]
_lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
_lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
_lowerCamelCase = re.sub(r'''<.*?>''' , '''''' , a__ , count=1 ).strip() # remove first task start token
_lowerCamelCase = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 706 |
from __future__ import annotations
def _lowerCamelCase ( _a , _a , _a ):
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : Optional[Any]= [True] * limit
lowercase__ : str= False
lowercase__ : Dict= False
lowercase__ : int= True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase__ : Any= i * 2
while index < limit:
lowercase__ : Union[str, Any]= False
lowercase__ : Any= index + i
lowercase__ : List[str]= [2]
for i in range(3 , A , 2 ):
if is_prime[i]:
primes.append(A )
return primes
def lowercase__(A = 1_000_000 ) ->int:
"""simple docstring"""
lowercase__ : Optional[Any]= prime_sieve(A )
lowercase__ : List[str]= 0
lowercase__ : int= 0
for i in range(len(A ) ):
for j in range(i + length , len(A ) ):
lowercase__ : Tuple= sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase__ : Optional[int]= j - i
lowercase__ : List[Any]= sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 218 |
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
a : List[str] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
a : Optional[int] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
a : List[Any] = [file for file in filepaths if """ """ in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
a : Optional[int] = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
a : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
a : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 218 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_ :
"""simple docstring"""
def __init__( self ,lowercase ,lowercase=13 ,lowercase=32 ,lowercase=3 ,lowercase=4 ,lowercase=[10, 20, 30, 40] ,lowercase=[2, 2, 3, 2] ,lowercase=True ,lowercase=True ,lowercase=37 ,lowercase="gelu" ,lowercase=10 ,lowercase=0.02 ,lowercase=["stage2", "stage3", "stage4"] ,lowercase=[2, 3, 4] ,lowercase=None ,):
"""simple docstring"""
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : str = image_size
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : str = num_stages
UpperCAmelCase_ : Optional[int] = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Union[str, Any] = out_features
UpperCAmelCase_ : List[str] = out_indices
UpperCAmelCase_ : List[str] = scope
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ : Optional[int] = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.num_labels)
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A_ ( self):
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=lowercase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def A_ ( self ,lowercase ,lowercase ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : Tuple = ConvNextVaModel(config=lowercase)
model.to(lowercase)
model.eval()
UpperCAmelCase_ : Tuple = model(lowercase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def A_ ( self ,lowercase ,lowercase ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : int = ConvNextVaForImageClassification(lowercase)
model.to(lowercase)
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase ,labels=lowercase)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def A_ ( self ,lowercase ,lowercase ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : Any = ConvNextVaBackbone(config=lowercase)
model.to(lowercase)
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowercase)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) ,len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) ,[self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) ,len(config.out_features))
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:])
# verify backbone works with out_features=None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[Any] = ConvNextVaBackbone(config=lowercase)
model.to(lowercase)
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) ,1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) ,[self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) ,1)
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]])
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase_ : Tuple = config_and_inputs
UpperCAmelCase_ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase_ : Any = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class snake_case_ (lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ConvNextVaModelTester(self)
UpperCAmelCase_ : List[Any] = ConfigTester(self ,config_class=lowercase ,has_text_modality=lowercase ,hidden_size=37)
def A_ ( self):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self):
"""simple docstring"""
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking")
def A_ ( self):
"""simple docstring"""
pass
def A_ ( self):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase_ : Dict = True
if model_class.__name__ in [
*get_values(lowercase),
*get_values(lowercase),
]:
continue
UpperCAmelCase_ : Any = model_class(lowercase)
model.to(lowercase)
model.train()
UpperCAmelCase_ : Tuple = self._prepare_for_class(lowercase ,lowercase ,return_labels=lowercase)
UpperCAmelCase_ : Union[str, Any] = model(**lowercase).loss
loss.backward()
def A_ ( self):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : int = True
if (
model_class.__name__
in [*get_values(lowercase), *get_values(lowercase)]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase_ : List[Any] = model_class(lowercase)
model.to(lowercase)
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase_ : int = self._prepare_for_class(lowercase ,lowercase ,return_labels=lowercase)
UpperCAmelCase_ : Optional[int] = model(**lowercase).loss
loss.backward()
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = model_class(lowercase)
UpperCAmelCase_ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase_ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,lowercase)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def A_ ( self):
"""simple docstring"""
def check_hidden_states_output(lowercase ,lowercase ,lowercase):
UpperCAmelCase_ : int = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(**self._prepare_for_class(lowercase ,lowercase))
UpperCAmelCase_ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Any = self.model_tester.num_stages
self.assertEqual(len(lowercase) ,expected_num_stages + 1)
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = True
check_hidden_states_output(lowercase ,lowercase ,lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
check_hidden_states_output(lowercase ,lowercase ,lowercase)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase)
@slow
def A_ ( self):
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : int = ConvNextVaModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224") if is_vision_available() else None
@slow
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224").to(lowercase)
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : Optional[int] = preprocessor(images=lowercase ,return_tensors="pt").to(lowercase)
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowercase)
# verify the logits
UpperCAmelCase_ : List[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape ,lowercase)
UpperCAmelCase_ : Optional[Any] = torch.tensor([0.9996, 0.1966, -0.4386]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase ,atol=1E-4))
| 720 |
def _snake_case ( __snake_case , __snake_case ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ : List[Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : int = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Optional[int] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 455 | 0 |
from __future__ import annotations
_A = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str =[
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCAmelCase ) )
] # the reference grid
SCREAMING_SNAKE_CASE : Optional[int] =1
SCREAMING_SNAKE_CASE : int =[
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCAmelCase ) )
] # the action grid
SCREAMING_SNAKE_CASE : Optional[Any] =init[0]
SCREAMING_SNAKE_CASE : Tuple =init[1]
SCREAMING_SNAKE_CASE : List[Any] =0
SCREAMING_SNAKE_CASE : Tuple =g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE : List[str] =[[f, g, x, y]]
SCREAMING_SNAKE_CASE : Optional[int] =False # flag that is set when search is complete
SCREAMING_SNAKE_CASE : Optional[int] =False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCAmelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE : Optional[Any] =cell.pop()
SCREAMING_SNAKE_CASE : str =next_cell[2]
SCREAMING_SNAKE_CASE : List[str] =next_cell[3]
SCREAMING_SNAKE_CASE : int =next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE : Union[str, Any] =True
else:
for i in range(len(_lowerCAmelCase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE : Dict =x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE : Tuple =y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE : int =g + cost
SCREAMING_SNAKE_CASE : List[str] =ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE : List[Any] =1
SCREAMING_SNAKE_CASE : Optional[int] =i
SCREAMING_SNAKE_CASE : Union[str, Any] =[]
SCREAMING_SNAKE_CASE : Optional[Any] =goal[0]
SCREAMING_SNAKE_CASE : Tuple =goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE : int =x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE : Any =y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE : int =xa
SCREAMING_SNAKE_CASE : List[Any] =ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE : List[Any] =[]
for i in range(len(_lowerCAmelCase ) ):
path.append(invpath[len(_lowerCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
_A = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_A = [0, 0]
# all coordinates are given in format [y,x]
_A = [len(grid) - 1, len(grid[0]) - 1]
_A = 1
# the cost map which pushes the path closer to the goal
_A = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_A = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_A = 99
_A = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 258 | """simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : List[str] = XCLIPTextConfig()
# derive patch size from model name
lowercase__ : List[Any] = model_name.find('patch' )
lowercase__ : List[str] = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowercase__ : Optional[int] = XCLIPVisionConfig(patch_size=_lowerCAmelCase , num_frames=_lowerCAmelCase )
if "large" in model_name:
lowercase__ : List[Any] = 768
lowercase__ : List[Any] = 3072
lowercase__ : List[str] = 12
lowercase__ : List[Any] = 1024
lowercase__ : int = 4096
lowercase__ : List[str] = 16
lowercase__ : Optional[Any] = 24
lowercase__ : int = 768
lowercase__ : List[str] = 3072
if model_name == "xclip-large-patch14-16-frames":
lowercase__ : List[Any] = 336
lowercase__ : List[str] = XCLIPConfig.from_text_vision_configs(_lowerCAmelCase , _lowerCAmelCase )
if "large" in model_name:
lowercase__ : List[Any] = 768
return config
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
if name == "token_embedding.weight":
lowercase__ : List[Any] = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowercase__ : List[str] = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowercase__ : Any = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowercase__ : Union[str, Any] = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowercase__ : List[Any] = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowercase__ : Dict = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowercase__ : Tuple = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowercase__ : str = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowercase__ : Dict = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowercase__ : Tuple = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowercase__ : str = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowercase__ : Tuple = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowercase__ : int = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowercase__ : Optional[Any] = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowercase__ : List[str] = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowercase__ : str = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowercase__ : Tuple = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowercase__ : Optional[Any] = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowercase__ : Union[str, Any] = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowercase__ : int = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowercase__ : Tuple = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowercase__ : List[str] = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : Any = orig_state_dict.pop(_lowerCAmelCase )
if "attn.in_proj" in key:
lowercase__ : str = key.split('.' )
if key.startswith('visual' ):
lowercase__ : Dict = key_split[3]
lowercase__ : int = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowercase__ : List[str] = val[
:dim, :
]
lowercase__ : Tuple = val[
dim : dim * 2, :
]
lowercase__ : Dict = val[
-dim:, :
]
else:
lowercase__ : Dict = val[
:dim
]
lowercase__ : int = val[
dim : dim * 2
]
lowercase__ : str = val[
-dim:
]
else:
if "weight" in key:
lowercase__ : List[Any] = val[
:dim, :
]
lowercase__ : Dict = val[
dim : dim * 2, :
]
lowercase__ : str = val[
-dim:, :
]
else:
lowercase__ : Optional[int] = val[:dim]
lowercase__ : Any = val[
dim : dim * 2
]
lowercase__ : Tuple = val[-dim:]
elif key.startswith('mit' ):
lowercase__ : Optional[int] = key_split[2]
lowercase__ : str = config.vision_config.mit_hidden_size
if "weight" in key:
lowercase__ : List[str] = val[:dim, :]
lowercase__ : Optional[int] = val[dim : dim * 2, :]
lowercase__ : List[str] = val[-dim:, :]
else:
lowercase__ : Tuple = val[:dim]
lowercase__ : List[Any] = val[dim : dim * 2]
lowercase__ : List[str] = val[-dim:]
else:
lowercase__ : int = key_split[2]
lowercase__ : Union[str, Any] = config.text_config.hidden_size
if "weight" in key:
lowercase__ : List[Any] = val[:dim, :]
lowercase__ : Optional[Any] = val[
dim : dim * 2, :
]
lowercase__ : List[Any] = val[-dim:, :]
else:
lowercase__ : Any = val[:dim]
lowercase__ : List[Any] = val[
dim : dim * 2
]
lowercase__ : Optional[Any] = val[-dim:]
else:
lowercase__ : List[str] = rename_key(_lowerCAmelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowercase__ : str = val.T
lowercase__ : List[str] = val
return orig_state_dict
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if num_frames == 8:
lowercase__ : List[str] = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
lowercase__ : List[str] = 'eating_spaghetti.npy'
elif num_frames == 32:
lowercase__ : Union[str, Any] = 'eating_spaghetti_32_frames.npy'
lowercase__ : List[Any] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=_lowerCAmelCase , repo_type='dataset' , )
lowercase__ : Dict = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[int]=False ):
'''simple docstring'''
lowercase__ : str = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
lowercase__ : Dict = model_to_url[model_name]
lowercase__ : Union[str, Any] = 8
if "16-frames" in model_name:
lowercase__ : int = 16
elif "shot" in model_name:
lowercase__ : Union[str, Any] = 32
lowercase__ : Optional[int] = get_xclip_config(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Dict = XCLIPModel(_lowerCAmelCase )
model.eval()
if "drive" in checkpoint_url:
lowercase__ : Union[str, Any] = 'pytorch_model.bin'
gdown.cached_download(_lowerCAmelCase , _lowerCAmelCase , quiet=_lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.load(_lowerCAmelCase , map_location='cpu' )['model']
else:
lowercase__ : Any = torch.hub.load_state_dict_from_url(_lowerCAmelCase )['model']
lowercase__ : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Optional[Any] = XCLIPModel(_lowerCAmelCase )
lowercase__ , lowercase__ : Optional[Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowercase__ : Union[str, Any] = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
lowercase__ : List[str] = VideoMAEImageProcessor(size=_lowerCAmelCase )
lowercase__ : Tuple = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowercase__ : int = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowercase__ : Union[str, Any] = XCLIPProcessor(image_processor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
lowercase__ : Tuple = prepare_video(_lowerCAmelCase )
lowercase__ : Dict = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=_lowerCAmelCase , return_tensors='pt' , padding=_lowerCAmelCase )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowercase__ : List[str] = model(**_lowerCAmelCase )
# Verify outputs
lowercase__ : Dict = outputs.logits_per_video
lowercase__ : List[str] = logits_per_video.softmax(dim=1 )
print('Probs:' , _lowerCAmelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
lowercase__ : str = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
lowercase__ : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
lowercase__ : Dict = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
lowercase__ : List[str] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
lowercase__ : Optional[Any] = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
lowercase__ : Any = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowercase__ : List[Any] = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowercase__ : Optional[int] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowercase__ : List[str] = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowercase__ : Optional[int] = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowercase__ : Any = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowercase__ : str = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowercase__ : Tuple = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowercase__ : Optional[Any] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowercase__ : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowercase__ : Tuple = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowercase__ : Dict = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowercase__ : Union[str, Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(_lowerCAmelCase , organization='nielsr' )
processor.push_to_hub(_lowerCAmelCase , organization='nielsr' )
slow_tokenizer.push_to_hub(_lowerCAmelCase , organization='nielsr' )
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 599 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : int = UnCLIPImageVariationPipeline
_A : List[Any] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
_A : int = IMAGE_VARIATION_BATCH_PARAMS
_A : Tuple = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
_A : Optional[int] = False
@property
def A_ ( self : Any ):
'''simple docstring'''
return 32
@property
def A_ ( self : List[str] ):
'''simple docstring'''
return 32
@property
def A_ ( self : Dict ):
'''simple docstring'''
return self.time_input_dim
@property
def A_ ( self : Any ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Tuple ):
'''simple docstring'''
return 100
@property
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def A_ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def A_ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__lowercase )
@property
def A_ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : int = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
__UpperCAmelCase : Union[str, Any] = UnCLIPTextProjModel(**__lowercase )
return model
@property
def A_ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Tuple = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
__UpperCAmelCase : Dict = UNetaDConditionModel(**__lowercase )
return model
@property
def A_ ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def A_ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Tuple = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def A_ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(1 )
__UpperCAmelCase : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def A_ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.dummy_decoder
__UpperCAmelCase : List[str] = self.dummy_text_proj
__UpperCAmelCase : Optional[int] = self.dummy_text_encoder
__UpperCAmelCase : str = self.dummy_tokenizer
__UpperCAmelCase : str = self.dummy_super_res_first
__UpperCAmelCase : Any = self.dummy_super_res_last
__UpperCAmelCase : List[str] = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_000 , )
__UpperCAmelCase : List[Any] = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_000 , )
__UpperCAmelCase : Optional[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
__UpperCAmelCase : Dict = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def A_ ( self : Optional[Any] , __lowercase : int , __lowercase : Union[str, Any]=0 , __lowercase : Optional[Any]=True ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
__UpperCAmelCase : Tuple = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
if pil_image:
__UpperCAmelCase : List[Any] = input_image * 0.5 + 0.5
__UpperCAmelCase : List[Any] = input_image.clamp(0 , 1 )
__UpperCAmelCase : int = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__UpperCAmelCase : Dict = DiffusionPipeline.numpy_to_pil(__lowercase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = '''cpu'''
__UpperCAmelCase : List[Any] = self.get_dummy_components()
__UpperCAmelCase : Any = self.pipeline_class(**__lowercase )
__UpperCAmelCase : List[Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : str = self.get_dummy_inputs(__lowercase , pil_image=__lowercase )
__UpperCAmelCase : Optional[int] = pipe(**__lowercase )
__UpperCAmelCase : int = output.images
__UpperCAmelCase : int = self.get_dummy_inputs(__lowercase , pil_image=__lowercase )
__UpperCAmelCase : Any = pipe(
**__lowercase , return_dict=__lowercase , )[0]
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : str = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : int = '''cpu'''
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Optional[int] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[str] = self.get_dummy_inputs(__lowercase , pil_image=__lowercase )
__UpperCAmelCase : Optional[Any] = pipe(**__lowercase )
__UpperCAmelCase : List[Any] = output.images
__UpperCAmelCase : Optional[int] = self.get_dummy_inputs(__lowercase , pil_image=__lowercase )
__UpperCAmelCase : Optional[Any] = pipe(
**__lowercase , return_dict=__lowercase , )[0]
__UpperCAmelCase : int = image[0, -3:, -3:, -1]
__UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : str = '''cpu'''
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Any = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Optional[Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Any = self.get_dummy_inputs(__lowercase , pil_image=__lowercase )
__UpperCAmelCase : int = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
__UpperCAmelCase : List[str] = pipe(**__lowercase )
__UpperCAmelCase : List[Any] = output.images
__UpperCAmelCase : List[str] = self.get_dummy_inputs(__lowercase , pil_image=__lowercase )
__UpperCAmelCase : Optional[int] = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
__UpperCAmelCase : Optional[Any] = pipe(
**__lowercase , return_dict=__lowercase , )[0]
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
__UpperCAmelCase : Any = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = torch.device('''cpu''' )
class snake_case :
'''simple docstring'''
_A : int = 1
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : int = self.pipeline_class(**__lowercase )
__UpperCAmelCase : str = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
__UpperCAmelCase : Optional[Any] = pipe.decoder.dtype
__UpperCAmelCase : str = 1
__UpperCAmelCase : Any = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__UpperCAmelCase : Tuple = pipe.prepare_latents(
__lowercase , dtype=__lowercase , device=__lowercase , generator=__lowercase , latents=__lowercase , scheduler=DummyScheduler() )
__UpperCAmelCase : Dict = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__UpperCAmelCase : List[str] = pipe.prepare_latents(
__lowercase , dtype=__lowercase , device=__lowercase , generator=__lowercase , latents=__lowercase , scheduler=DummyScheduler() )
__UpperCAmelCase : int = self.get_dummy_inputs(__lowercase , pil_image=__lowercase )
__UpperCAmelCase : List[str] = pipe(
**__lowercase , decoder_latents=__lowercase , super_res_latents=__lowercase ).images
__UpperCAmelCase : List[Any] = self.get_dummy_inputs(__lowercase , pil_image=__lowercase )
# Don't pass image, instead pass embedding
__UpperCAmelCase : Tuple = pipeline_inputs.pop('''image''' )
__UpperCAmelCase : List[str] = pipe.image_encoder(__lowercase ).image_embeds
__UpperCAmelCase : int = pipe(
**__lowercase , decoder_latents=__lowercase , super_res_latents=__lowercase , image_embeddings=__lowercase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__UpperCAmelCase : int = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , expected_max_diff=__lowercase )
@skip_mps
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Dict = torch_device == '''cpu'''
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : int = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , additional_params_copy_to_batched_inputs=__lowercase , )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__UpperCAmelCase : Any = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__lowercase , additional_params_copy_to_batched_inputs=__lowercase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__lowercase )
@skip_mps
def A_ ( self : int ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A_ ( self : List[Any] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def A_ ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
__UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
__UpperCAmelCase : List[str] = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
__UpperCAmelCase : List[str] = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = pipeline(
__lowercase , generator=__lowercase , output_type='''np''' , )
__UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__lowercase , __lowercase , 15 ) | 374 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : int = OmegaConf.load(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location='''cpu''' )['''model''']
__UpperCAmelCase : List[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
__UpperCAmelCase : Dict = {}
__UpperCAmelCase : Tuple = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = state_dict[key]
# extract state_dict for UNetLDM
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Any = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCAmelCase_ ):
__UpperCAmelCase : int = state_dict[key]
__UpperCAmelCase : Any = config.model.params.first_stage_config.params
__UpperCAmelCase : Dict = config.model.params.unet_config.params
__UpperCAmelCase : Tuple = VQModel(**UpperCAmelCase_ ).eval()
vqvae.load_state_dict(UpperCAmelCase_ )
__UpperCAmelCase : Any = UNetLDMModel(**UpperCAmelCase_ ).eval()
unet.load_state_dict(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCAmelCase_ , )
__UpperCAmelCase : Tuple = LDMPipeline(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
pipeline.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ :str = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
lowercase__ :Dict = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 374 | 1 |
def a ( A__ ) -> str:
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a ( A__ ) -> bytes:
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 101 | 0 |
"""simple docstring"""
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_lowercase : Optional[int] = Mapping[str, np.ndarray]
_lowercase : Dict = Mapping[str, Any] # Is a nested dict.
_lowercase : Union[str, Any] = 0.01
@dataclasses.dataclass(frozen=_lowerCAmelCase )
class _UpperCAmelCase :
a__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
a__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
a__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
a__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
a__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
a__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
a__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
a__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
a__ : Optional[Sequence[int]] = None
def lowercase__ ( snake_case_ :str ):
__UpperCAmelCase = r'''(\[[A-Z]+\]\n)'''
__UpperCAmelCase = [tag.strip() for tag in re.split(snake_case_ , snake_case_ ) if len(snake_case_ ) > 0]
__UpperCAmelCase = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
__UpperCAmelCase = ['''N''', '''CA''', '''C''']
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
for g in groups:
if "[PRIMARY]" == g[0]:
__UpperCAmelCase = g[1][0].strip()
for i in range(len(snake_case_ ) ):
if seq[i] not in residue_constants.restypes:
__UpperCAmelCase = '''X''' # FIXME: strings are immutable
__UpperCAmelCase = np.array(
[residue_constants.restype_order.get(snake_case_ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__UpperCAmelCase = []
for axis in range(3 ):
tertiary.append(list(map(snake_case_ , g[1][axis].split() ) ) )
__UpperCAmelCase = np.array(snake_case_ )
__UpperCAmelCase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(snake_case_ ):
__UpperCAmelCase = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__UpperCAmelCase = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
__UpperCAmelCase = np.zeros(
(
len(snake_case_ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(snake_case_ ):
__UpperCAmelCase = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=snake_case_ , atom_mask=snake_case_ , aatype=snake_case_ , residue_index=np.arange(len(snake_case_ ) ) , b_factors=snake_case_ , )
def lowercase__ ( snake_case_ :Protein , snake_case_ :int = 0 ):
__UpperCAmelCase = []
__UpperCAmelCase = prot.remark
if remark is not None:
pdb_headers.append(F'''REMARK {remark}''' )
__UpperCAmelCase = prot.parents
__UpperCAmelCase = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__UpperCAmelCase = [p for i, p in zip(snake_case_ , snake_case_ ) if i == chain_id]
if parents is None or len(snake_case_ ) == 0:
__UpperCAmelCase = ['''N/A''']
pdb_headers.append(F'''PARENT {" ".join(snake_case_ )}''' )
return pdb_headers
def lowercase__ ( snake_case_ :Protein , snake_case_ :str ):
__UpperCAmelCase = []
__UpperCAmelCase = pdb_str.split('''\n''' )
__UpperCAmelCase = prot.remark
if remark is not None:
out_pdb_lines.append(F'''REMARK {remark}''' )
__UpperCAmelCase = 42
if prot.parents is not None and len(prot.parents ) > 0:
__UpperCAmelCase = []
if prot.parents_chain_index is not None:
__UpperCAmelCase = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(snake_case_ ) , [] )
parent_dict[str(snake_case_ )].append(snake_case_ )
__UpperCAmelCase = max([int(snake_case_ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__UpperCAmelCase = parent_dict.get(str(snake_case_ ) , ['''N/A'''] )
parents_per_chain.append(snake_case_ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__UpperCAmelCase = [['''N/A''']]
def make_parent_line(snake_case_ :Sequence[str] ) -> str:
return F'''PARENT {" ".join(snake_case_ )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__UpperCAmelCase = 0
for i, l in enumerate(snake_case_ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(snake_case_ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(snake_case_ ):
__UpperCAmelCase = parents_per_chain[chain_counter]
else:
__UpperCAmelCase = ['''N/A''']
out_pdb_lines.append(make_parent_line(snake_case_ ) )
return "\n".join(snake_case_ )
def lowercase__ ( snake_case_ :Protein ):
__UpperCAmelCase = residue_constants.restypes + ['''X''']
def res_atoa(snake_case_ :int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
__UpperCAmelCase = residue_constants.atom_types
__UpperCAmelCase = []
__UpperCAmelCase = prot.atom_mask
__UpperCAmelCase = prot.aatype
__UpperCAmelCase = prot.atom_positions
__UpperCAmelCase = prot.residue_index.astype(np.intaa )
__UpperCAmelCase = prot.b_factors
__UpperCAmelCase = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
__UpperCAmelCase = get_pdb_headers(snake_case_ )
if len(snake_case_ ) > 0:
pdb_lines.extend(snake_case_ )
__UpperCAmelCase = aatype.shape[0]
__UpperCAmelCase = 1
__UpperCAmelCase = 0
__UpperCAmelCase = string.ascii_uppercase
__UpperCAmelCase = None
# Add all atom sites.
for i in range(snake_case_ ):
__UpperCAmelCase = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(snake_case_ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__UpperCAmelCase = '''ATOM'''
__UpperCAmelCase = atom_name if len(snake_case_ ) == 4 else F''' {atom_name}'''
__UpperCAmelCase = ''''''
__UpperCAmelCase = ''''''
__UpperCAmelCase = 1.00
__UpperCAmelCase = atom_name[0] # Protein supports only C, N, O, S, this works.
__UpperCAmelCase = ''''''
__UpperCAmelCase = '''A'''
if chain_index is not None:
__UpperCAmelCase = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__UpperCAmelCase = (
F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
F'''{res_name_a:>3} {chain_tag:>1}'''
F'''{residue_index[i]:>4}{insertion_code:>1} '''
F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
F'''{occupancy:>6.2f}{b_factor:>6.2f} '''
F'''{element:>2}{charge:>2}'''
)
pdb_lines.append(snake_case_ )
atom_index += 1
__UpperCAmelCase = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__UpperCAmelCase = True
__UpperCAmelCase = chain_index[i + 1]
if should_terminate:
# Close the chain.
__UpperCAmelCase = '''TER'''
__UpperCAmelCase = (
F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(snake_case_ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(snake_case_ , snake_case_ ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(snake_case_ )
def lowercase__ ( snake_case_ :Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowercase__ ( snake_case_ :FeatureDict , snake_case_ :ModelOutput , snake_case_ :Optional[np.ndarray] = None , snake_case_ :Optional[np.ndarray] = None , snake_case_ :Optional[str] = None , snake_case_ :Optional[Sequence[str]] = None , snake_case_ :Optional[Sequence[int]] = None , ):
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=snake_case_ , remark=snake_case_ , parents=snake_case_ , parents_chain_index=snake_case_ , )
| 720 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def a ( *_lowercase : List[Any] , **_lowercase : Optional[Any] ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
a__ : Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def a ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : List[str] ):
__UpperCAmelCase = ObjectDetectionPipeline(model=_lowercase , image_processor=_lowercase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def a ( self : int , _lowercase : List[Any] , _lowercase : str ):
__UpperCAmelCase = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(_lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_lowercase , {
'''score''': ANY(_lowercase ),
'''label''': ANY(_lowercase ),
'''box''': {'''xmin''': ANY(_lowercase ), '''ymin''': ANY(_lowercase ), '''xmax''': ANY(_lowercase ), '''ymax''': ANY(_lowercase )},
} , )
import datasets
__UpperCAmelCase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__UpperCAmelCase = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__UpperCAmelCase = object_detector(_lowercase , threshold=0.0 )
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for outputs in batch_outputs:
self.assertGreater(len(_lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_lowercase , {
'''score''': ANY(_lowercase ),
'''label''': ANY(_lowercase ),
'''box''': {'''xmin''': ANY(_lowercase ), '''ymin''': ANY(_lowercase ), '''xmax''': ANY(_lowercase ), '''ymax''': ANY(_lowercase )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def a ( self : Dict ):
pass
@require_torch
def a ( self : Union[str, Any] ):
__UpperCAmelCase = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__UpperCAmelCase = AutoModelForObjectDetection.from_pretrained(_lowercase )
__UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_lowercase )
__UpperCAmelCase = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase )
__UpperCAmelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
] , )
__UpperCAmelCase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
],
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
],
] , )
@require_torch
@slow
def a ( self : Union[str, Any] ):
__UpperCAmelCase = '''facebook/detr-resnet-50'''
__UpperCAmelCase = AutoModelForObjectDetection.from_pretrained(_lowercase )
__UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_lowercase )
__UpperCAmelCase = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase )
__UpperCAmelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
__UpperCAmelCase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
] , )
@require_torch
@slow
def a ( self : Dict ):
__UpperCAmelCase = '''facebook/detr-resnet-50'''
__UpperCAmelCase = pipeline('''object-detection''' , model=_lowercase )
__UpperCAmelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
__UpperCAmelCase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
] , )
@require_torch
@slow
def a ( self : Tuple ):
__UpperCAmelCase = 0.9_985
__UpperCAmelCase = '''facebook/detr-resnet-50'''
__UpperCAmelCase = pipeline('''object-detection''' , model=_lowercase )
__UpperCAmelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=_lowercase )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def a ( self : List[str] ):
__UpperCAmelCase = '''Narsil/layoutlmv3-finetuned-funsd'''
__UpperCAmelCase = 0.9_993
__UpperCAmelCase = pipeline('''object-detection''' , model=_lowercase , threshold=_lowercase )
__UpperCAmelCase = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}},
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}},
] , )
| 397 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __snake_case ( __magic_name__ ):
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
class __snake_case ( __magic_name__ ):
def __init__( self , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=512 , UpperCamelCase_="cls" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ) -> List[Any]:
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
snake_case__ = project_dim
snake_case__ = pooler_fn
snake_case__ = learn_encoder
snake_case__ = use_attention_mask
class __snake_case ( __magic_name__ ):
__lowerCAmelCase = [r'''pooler''', r'''logit_scale''']
__lowerCAmelCase = [r'''position_ids''', r'''predictions.decoder.bias''']
__lowerCAmelCase = '''roberta'''
__lowerCAmelCase = RobertaSeriesConfig
def __init__( self , UpperCamelCase_ ) -> List[Any]:
super().__init__(UpperCamelCase_ )
snake_case__ = XLMRobertaModel(UpperCamelCase_ )
snake_case__ = nn.Linear(config.hidden_size , config.project_dim )
snake_case__ = getattr(UpperCamelCase_ , 'has_pre_transformation' , UpperCamelCase_ )
if self.has_pre_transformation:
snake_case__ = nn.Linear(config.hidden_size , config.project_dim )
snake_case__ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _snake_case ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Optional[Any]:
snake_case__ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ = self.base_model(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_attentions=UpperCamelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCamelCase_ , )
if self.has_pre_transformation:
snake_case__ = outputs['hidden_states'][-2]
snake_case__ = self.pre_LN(UpperCamelCase_ )
snake_case__ = self.transformation_pre(UpperCamelCase_ )
return TransformationModelOutput(
projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
snake_case__ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 368 |
'''simple docstring'''
import enum
import shutil
import sys
a__ , a__ : Any = shutil.get_terminal_size()
a__ : Optional[int] = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class __snake_case ( enum.Enum ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_="" ) ->Optional[Any]:
sys.stdout.write(str(UpperCAmelCase_ ) + end )
sys.stdout.flush()
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="" ) ->List[str]:
forceWrite(f'''\u001b[{color}m{content}\u001b[0m''' , UpperCAmelCase_ )
def __lowerCamelCase ( ) ->Optional[Any]:
forceWrite('\r' )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Any:
forceWrite(f'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def __lowerCamelCase ( ) ->str:
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def __lowerCamelCase ( ) ->Tuple:
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 368 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase : Any = logging.get_logger(__name__)
class A__ ( A__ ):
"""simple docstring"""
_lowercase = ['pixel_values']
def __init__( self : int , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : float = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 255 , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , **lowerCamelCase__ : Tuple , ):
super().__init__(**lowerCamelCase__ )
a__ : Dict = size if size is not None else {"shortest_edge": 384}
a__ : Dict = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
a__ : Dict = do_resize
a__ : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
a__ : Optional[int] = crop_pct if crop_pct is not None else 224 / 256
a__ : Optional[int] = resample
a__ : int = do_rescale
a__ : Optional[Any] = rescale_factor
a__ : Union[str, Any] = do_normalize
a__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : float , lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Union[str, Any] , ):
a__ : Tuple = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
a__ : Optional[int] = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
a__ : Union[str, Any] = int(shortest_edge / crop_pct )
a__ : int = get_resize_output_image_size(lowerCamelCase__ , size=lowerCamelCase__ , default_to_square=lowerCamelCase__ )
a__ : int = resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__ , size=(shortest_edge, shortest_edge) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__ , size=(shortest_edge, shortest_edge) , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Union[str, Any] , ):
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : int , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Optional[int] , ):
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : float = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : Tuple , ):
a__ : Tuple = do_resize if do_resize is not None else self.do_resize
a__ : str = crop_pct if crop_pct is not None else self.crop_pct
a__ : str = resample if resample is not None else self.resample
a__ : str = do_rescale if do_rescale is not None else self.do_rescale
a__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
a__ : List[str] = image_mean if image_mean is not None else self.image_mean
a__ : Tuple = image_std if image_std is not None else self.image_std
a__ : List[str] = size if size is not None else self.size
a__ : Dict = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
a__ : Optional[Any] = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
a__ : Optional[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
a__ : Tuple = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , crop_pct=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_rescale:
a__ : Optional[int] = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
a__ : Optional[int] = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
a__ : Optional[Any] = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
a__ : List[str] = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 151 |
def UpperCamelCase_ ( __a ) -> int:
if not isinstance(__a , __a ):
raise TypeError("only integers accepted as input" )
else:
a__ : Union[str, Any] = str(abs(__a ) )
a__ : Dict = [list(__a ) for char in range(len(__a ) )]
for index in range(len(__a ) ):
num_transpositions[index].pop(__a )
return max(
int("".join(list(__a ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 151 | 1 |
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = tuple[int, int, int]
SCREAMING_SNAKE_CASE_ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
SCREAMING_SNAKE_CASE_ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
SCREAMING_SNAKE_CASE_ = '''EGZWVONAHDCLFQMSIPJBYUKXTR'''
SCREAMING_SNAKE_CASE_ = '''FOBHMDKEXQNRAULPGSJVTYICZW'''
SCREAMING_SNAKE_CASE_ = '''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
SCREAMING_SNAKE_CASE_ = {
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
SCREAMING_SNAKE_CASE_ = '''RMDJXFUWGISLHVTCQNKYPBEZOA'''
SCREAMING_SNAKE_CASE_ = '''SGLCPQWZHKXAREONTFBVIYJUDM'''
SCREAMING_SNAKE_CASE_ = '''HVSICLTYKQUBXDWAJZOMFGPREN'''
SCREAMING_SNAKE_CASE_ = '''RZWQHFMVDBKICJLNTUXAGYPSOE'''
SCREAMING_SNAKE_CASE_ = '''LFKIJODBEGAMQPXVUHYSTCZRWN'''
SCREAMING_SNAKE_CASE_ = '''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_lowerCAmelCase ) )) < 3:
__lowerCAmelCase = f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(_lowerCAmelCase )
# Checks if rotor positions are valid
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = rotpos
if not 0 < rotorposa <= len(_lowerCAmelCase ):
__lowerCAmelCase = f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(_lowerCAmelCase )
if not 0 < rotorposa <= len(_lowerCAmelCase ):
__lowerCAmelCase = f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_lowerCAmelCase )
if not 0 < rotorposa <= len(_lowerCAmelCase ):
__lowerCAmelCase = f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_lowerCAmelCase )
# Validates string and returns dict
__lowerCAmelCase = _plugboard(_lowerCAmelCase )
return rotpos, rotsel, pbdict
def lowercase (_lowerCAmelCase ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = f"""Plugboard setting isn't type string ({type(_lowerCAmelCase )})"""
raise TypeError(_lowerCAmelCase )
elif len(_lowerCAmelCase ) % 2 != 0:
__lowerCAmelCase = f"""Odd number of symbols ({len(_lowerCAmelCase )})"""
raise Exception(_lowerCAmelCase )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
__lowerCAmelCase = set()
for i in pbstring:
if i not in abc:
__lowerCAmelCase = f"""'{i}' not in list of symbols"""
raise Exception(_lowerCAmelCase )
elif i in tmppbl:
__lowerCAmelCase = f"""Duplicate symbol ({i})"""
raise Exception(_lowerCAmelCase )
else:
tmppbl.add(_lowerCAmelCase )
del tmppbl
# Created the dictionary
__lowerCAmelCase = {}
for j in range(0 , len(_lowerCAmelCase ) - 1 , 2 ):
__lowerCAmelCase = pbstring[j + 1]
__lowerCAmelCase = pbstring[j]
return pb
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = (rotora, rotora, rotora) , _lowerCAmelCase = "" , ):
__lowerCAmelCase = text.upper()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = _validator(
_lowerCAmelCase , _lowerCAmelCase , plugb.upper() )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = rotor_position
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__lowerCAmelCase = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__lowerCAmelCase = plugboard[symbol]
# rotor ra --------------------------
__lowerCAmelCase = abc.index(_lowerCAmelCase ) + rotorposa
__lowerCAmelCase = rotora[index % len(_lowerCAmelCase )]
# rotor rb --------------------------
__lowerCAmelCase = abc.index(_lowerCAmelCase ) + rotorposa
__lowerCAmelCase = rotora[index % len(_lowerCAmelCase )]
# rotor rc --------------------------
__lowerCAmelCase = abc.index(_lowerCAmelCase ) + rotorposa
__lowerCAmelCase = rotora[index % len(_lowerCAmelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__lowerCAmelCase = reflector[symbol]
# 2nd rotors
__lowerCAmelCase = abc[rotora.index(_lowerCAmelCase ) - rotorposa]
__lowerCAmelCase = abc[rotora.index(_lowerCAmelCase ) - rotorposa]
__lowerCAmelCase = abc[rotora.index(_lowerCAmelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__lowerCAmelCase = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowerCAmelCase ):
__lowerCAmelCase = 0
rotorposa += 1
if rotorposa >= len(_lowerCAmelCase ):
__lowerCAmelCase = 0
rotorposa += 1
if rotorposa >= len(_lowerCAmelCase ):
__lowerCAmelCase = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = '''This is my Python script that emulates the Enigma machine from WWII.'''
SCREAMING_SNAKE_CASE_ = (1, 1, 1)
SCREAMING_SNAKE_CASE_ = '''pictures'''
SCREAMING_SNAKE_CASE_ = (rotora, rotora, rotora)
SCREAMING_SNAKE_CASE_ = enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 465 |
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = '''ybelkada/fonts'''
def lowercase ():
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
requires_backends(_lowerCAmelCase , ["""torch"""] )
_check_torch_version()
__lowerCAmelCase = image_tensor.unsqueeze(0 )
__lowerCAmelCase = torch.nn.functional.unfold(_lowerCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowerCAmelCase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _lowerCAmelCase , _lowerCAmelCase , -1 )
__lowerCAmelCase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowercase (_lowerCAmelCase , _lowerCAmelCase = 36 , _lowerCAmelCase = "black" , _lowerCAmelCase = "white" , _lowerCAmelCase = 5 , _lowerCAmelCase = 5 , _lowerCAmelCase = 5 , _lowerCAmelCase = 5 , _lowerCAmelCase = None , _lowerCAmelCase = None , ):
requires_backends(_lowerCAmelCase , """vision""" )
# Add new lines so that each line is no more than 80 characters.
__lowerCAmelCase = textwrap.TextWrapper(width=80 )
__lowerCAmelCase = wrapper.wrap(text=_lowerCAmelCase )
__lowerCAmelCase = """\n""".join(_lowerCAmelCase )
if font_bytes is not None and font_path is None:
__lowerCAmelCase = io.BytesIO(_lowerCAmelCase )
elif font_path is not None:
__lowerCAmelCase = font_path
else:
__lowerCAmelCase = hf_hub_download(_lowerCAmelCase , """Arial.TTF""" )
__lowerCAmelCase = ImageFont.truetype(_lowerCAmelCase , encoding="""UTF-8""" , size=_lowerCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowerCAmelCase = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , _lowerCAmelCase ) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = temp_draw.textbbox((0, 0) , _lowerCAmelCase , _lowerCAmelCase )
# Create the actual image with a bit of padding around the text.
__lowerCAmelCase = text_width + left_padding + right_padding
__lowerCAmelCase = text_height + top_padding + bottom_padding
__lowerCAmelCase = Image.new("""RGB""" , (image_width, image_height) , _lowerCAmelCase )
__lowerCAmelCase = ImageDraw.Draw(_lowerCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=_lowerCAmelCase , fill=_lowerCAmelCase , font=_lowerCAmelCase )
return image
def lowercase (_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(_lowerCAmelCase , """vision""" )
# Convert to PIL image if necessary
__lowerCAmelCase = to_pil_image(_lowerCAmelCase )
__lowerCAmelCase = render_text(_lowerCAmelCase , **_lowerCAmelCase )
__lowerCAmelCase = max(header_image.width , image.width )
__lowerCAmelCase = int(image.height * (new_width / image.width) )
__lowerCAmelCase = int(header_image.height * (new_width / header_image.width) )
__lowerCAmelCase = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowerCAmelCase = to_numpy_array(_lowerCAmelCase )
if infer_channel_dimension_format(_lowerCAmelCase ) == ChannelDimension.LAST:
__lowerCAmelCase = to_channel_dimension_format(_lowerCAmelCase , ChannelDimension.LAST )
return new_image
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = ['''flattened_patches''']
def __init__( self , snake_case_ = True , snake_case_ = True , snake_case_ = None , snake_case_ = 2_048 , snake_case_ = False , **snake_case_ , ) -> None:
super().__init__(**snake_case_ )
__lowerCAmelCase = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
__lowerCAmelCase = do_normalize
__lowerCAmelCase = do_convert_rgb
__lowerCAmelCase = max_patches
__lowerCAmelCase = is_vqa
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> np.ndarray:
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
__lowerCAmelCase = to_channel_dimension_format(snake_case_ , ChannelDimension.FIRST )
__lowerCAmelCase = torch.from_numpy(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = patch_size["""height"""], patch_size["""width"""]
__lowerCAmelCase , __lowerCAmelCase = get_image_size(snake_case_ )
# maximize scale s.t.
__lowerCAmelCase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowerCAmelCase = max(min(math.floor(scale * image_height / patch_height ) , snake_case_ ) , 1 )
__lowerCAmelCase = max(min(math.floor(scale * image_width / patch_width ) , snake_case_ ) , 1 )
__lowerCAmelCase = max(num_feasible_rows * patch_height , 1 )
__lowerCAmelCase = max(num_feasible_cols * patch_width , 1 )
__lowerCAmelCase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=snake_case_ , antialias=snake_case_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowerCAmelCase = torch_extract_patches(snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = patches.shape
__lowerCAmelCase = patches_shape[1]
__lowerCAmelCase = patches_shape[2]
__lowerCAmelCase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowerCAmelCase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowerCAmelCase = torch.arange(snake_case_ ).reshape([rows, 1] ).repeat(1 , snake_case_ ).reshape([rows * columns, 1] )
__lowerCAmelCase = torch.arange(snake_case_ ).reshape([1, columns] ).repeat(snake_case_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowerCAmelCase = row_ids.to(torch.floataa )
__lowerCAmelCase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowerCAmelCase = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowerCAmelCase = torch.nn.functional.pad(snake_case_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
__lowerCAmelCase = to_numpy_array(snake_case_ )
return result
def A__ ( self , snake_case_ , snake_case_ = None , **snake_case_ ) -> np.ndarray:
if image.dtype == np.uinta:
__lowerCAmelCase = image.astype(np.floataa )
# take mean across the whole `image`
__lowerCAmelCase = np.mean(snake_case_ )
__lowerCAmelCase = np.std(snake_case_ )
__lowerCAmelCase = max(snake_case_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , **snake_case_ )
def A__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ) -> ImageInput:
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCAmelCase = patch_size if patch_size is not None else self.patch_size
__lowerCAmelCase = max_patches if max_patches is not None else self.max_patches
__lowerCAmelCase = self.is_vqa
if kwargs.get("""data_format""" , snake_case_ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
__lowerCAmelCase = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCAmelCase = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(snake_case_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
__lowerCAmelCase = kwargs.pop("""font_bytes""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""font_path""" , snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = [header_text] * len(snake_case_ )
__lowerCAmelCase = [
render_header(snake_case_ , header_text[i] , font_bytes=snake_case_ , font_path=snake_case_ )
for i, image in enumerate(snake_case_ )
]
if do_normalize:
__lowerCAmelCase = [self.normalize(image=snake_case_ ) for image in images]
# convert to torch tensor and permute
__lowerCAmelCase = [
self.extract_flattened_patches(image=snake_case_ , max_patches=snake_case_ , patch_size=snake_case_ )
for image in images
]
# create attention mask in numpy
__lowerCAmelCase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowerCAmelCase = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=snake_case_ )
return encoded_outputs
| 465 | 1 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCamelCase__ = numpy.array([0, 0])
lowerCamelCase__ = numpy.array([0.5, 0.8660254])
lowerCamelCase__ = numpy.array([1, 0])
lowerCamelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase__ ( a__ , a__ ) ->list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase = initial_vectors
for _ in range(a__ ):
_UpperCamelCase = iteration_step(a__ )
return vectors
def lowerCAmelCase__ ( a__ ) ->list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase = vectors[i + 1]
new_vectors.append(a__ )
_UpperCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase__ ( a__ , a__ ) ->numpy.ndarray:
'''simple docstring'''
_UpperCamelCase = numpy.radians(a__ )
_UpperCamelCase , _UpperCamelCase = numpy.cos(a__ ), numpy.sin(a__ )
_UpperCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(a__ , a__ )
def lowerCAmelCase__ ( a__ ) ->None:
'''simple docstring'''
_UpperCamelCase = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase = zip(*a__ )
plt.plot(a__ , a__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 82 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 1 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def UpperCamelCase_ ( A__ : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def UpperCamelCase_ ( A__ : np.ndarray , A__ : np.ndarray , A__ : np.ndarray ):
'''simple docstring'''
lowerCAmelCase_ : str = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(A__ , A__ )
# Predict target for test data
lowerCAmelCase_ : Optional[int] = xgb.predict(A__ )
lowerCAmelCase_ : Union[str, Any] = predictions.reshape(len(A__ ) , 1 )
return predictions
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : str = fetch_california_housing()
lowerCAmelCase_, lowerCAmelCase_ : List[str] = data_handling(A__ )
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Dict = train_test_split(
A__ , A__ , test_size=0.25 , random_state=1 )
lowerCAmelCase_ : Dict = xgboost(A__ , A__ , A__ )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(A__ , A__ )}' )
print(f'Mean Square Error : {mean_squared_error(A__ , A__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 275 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A : str = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275 | 1 |
from collections import defaultdict
from math import gcd
def __lowerCAmelCase ( A = 1500000 ):
UpperCAmelCase_ = defaultdict(UpperCAmelCase__ )
UpperCAmelCase_ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase__ , 2 ):
if gcd(UpperCAmelCase__ , UpperCAmelCase__ ) > 1:
continue
UpperCAmelCase_ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase__ , limit + 1 , UpperCAmelCase__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'{solution() = }') | 710 |
def __lowerCAmelCase ( A ):
if len(A ) <= 1:
return lst
UpperCAmelCase_ = 1
while i < len(A ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCAmelCase_ , UpperCAmelCase_ = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCAmelCase_ = 1
return lst
if __name__ == "__main__":
_a: List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
_a: Optional[int] = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted)) | 268 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase : List[Any] = "src/diffusers"
__lowerCamelCase : Dict = "."
# This is to make sure the diffusers module imported is the one in the repo.
__lowerCamelCase : Dict = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
__lowerCamelCase : Union[str, Any] = spec.loader.load_module()
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return line.startswith(lowerCAmelCase_ ) or len(lowerCAmelCase_ ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , lowerCAmelCase_ ) is not None
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = object_name.split("." )
lowercase = 0
# First let's find the module where our object lives.
lowercase = parts[i]
while i < len(lowerCAmelCase_ ) and not os.path.isfile(os.path.join(lowerCAmelCase_ , f'{module}.py' ) ):
i += 1
if i < len(lowerCAmelCase_ ):
lowercase = os.path.join(lowerCAmelCase_ , parts[i] )
if i >= len(lowerCAmelCase_ ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowerCAmelCase_ , f'{module}.py' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase = f.readlines()
# Now let's find the class / func in the code!
lowercase = ""
lowercase = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase_ ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase_ ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowercase = line_index
while line_index < len(lowerCAmelCase_ ) and _should_continue(lines[line_index] , lowerCAmelCase_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase = lines[start_index:line_index]
return "".join(lowerCAmelCase_ )
__lowerCamelCase : Any = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
__lowerCamelCase : int = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
__lowerCamelCase : Union[str, Any] = re.compile(r"<FILL\s+[^>]*>")
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = code.split("\n" )
lowercase = 0
while idx < len(lowerCAmelCase_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase_ ):
return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = len(get_indent(lowerCAmelCase_ ) ) > 0
if has_indent:
lowercase = f'class Bla:\n{code}'
lowercase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase_ )
lowercase = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
lowercase , lowercase = style_docstrings_in_code(lowerCAmelCase_ )
return result[len("class Bla:\n" ) :] if has_indent else result
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
with open(lowerCAmelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase = f.readlines()
lowercase = []
lowercase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase_ ):
lowercase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowercase , lowercase , lowercase = search.groups()
lowercase = find_code_in_diffusers(lowerCAmelCase_ )
lowercase = get_indent(lowerCAmelCase_ )
lowercase = line_index + 1 if indent == theoretical_indent else line_index + 2
lowercase = theoretical_indent
lowercase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowercase = True
while line_index < len(lowerCAmelCase_ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase_ ):
break
lowercase = lines[line_index]
lowercase = _should_continue(lowerCAmelCase_ , lowerCAmelCase_ ) and re.search(f'^{indent}# End copy' , lowerCAmelCase_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase = lines[start_index:line_index]
lowercase = "".join(lowerCAmelCase_ )
# Remove any nested `Copied from` comments to avoid circular copies
lowercase = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(lowerCAmelCase_ ) is None]
lowercase = "\n".join(lowerCAmelCase_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase_ ) > 0:
lowercase = replace_pattern.replace("with" , "" ).split("," )
lowercase = [_re_replace_pattern.search(lowerCAmelCase_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowercase , lowercase , lowercase = pattern.groups()
lowercase = re.sub(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if option.strip() == "all-casing":
lowercase = re.sub(obja.lower() , obja.lower() , lowerCAmelCase_ )
lowercase = re.sub(obja.upper() , obja.upper() , lowerCAmelCase_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowercase = blackify(lines[start_index - 1] + theoretical_code )
lowercase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowercase = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowercase = start_index + 1
if overwrite and len(lowerCAmelCase_ ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowerCAmelCase_ )
return diffs
def UpperCAmelCase_ ( lowerCAmelCase_ = False ):
"""simple docstring"""
lowercase = glob.glob(os.path.join(lowerCAmelCase_ , "**/*.py" ) , recursive=lowerCAmelCase_ )
lowercase = []
for filename in all_files:
lowercase = is_copy_consistent(lowerCAmelCase_ , lowerCAmelCase_ )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCAmelCase_ ) > 0:
lowercase = "\n".join(lowerCAmelCase_ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__lowerCamelCase : Dict = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 310 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Union[str, Any] = '''sew-d'''
def __init__(self : Tuple , A__ : str=3_2 , A__ : str=7_6_8 , A__ : Union[str, Any]=1_2 , A__ : List[str]=1_2 , A__ : List[Any]=3_0_7_2 , A__ : List[Any]=2 , A__ : Dict=5_1_2 , A__ : Any=2_5_6 , A__ : str=True , A__ : Dict=True , A__ : str=("p2c", "c2p") , A__ : Optional[int]="layer_norm" , A__ : Optional[int]="gelu_python" , A__ : List[Any]=0.1 , A__ : List[str]=0.1 , A__ : Optional[Any]=0.1 , A__ : Optional[Any]=0.0 , A__ : str=0.1 , A__ : Optional[int]=0.0_2 , A__ : Union[str, Any]=1e-7 , A__ : List[str]=1e-5 , A__ : Union[str, Any]="group" , A__ : List[Any]="gelu" , A__ : Dict=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , A__ : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A__ : Optional[Any]=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A__ : Optional[Any]=False , A__ : List[str]=1_2_8 , A__ : int=1_6 , A__ : List[str]=True , A__ : Dict=0.0_5 , A__ : Any=1_0 , A__ : str=2 , A__ : Optional[int]=0.0 , A__ : str=1_0 , A__ : List[Any]=0 , A__ : Tuple="mean" , A__ : Union[str, Any]=False , A__ : Optional[Any]=False , A__ : Optional[int]=2_5_6 , A__ : Dict=0 , A__ : List[str]=1 , A__ : str=2 , **A__ : Tuple , ) -> List[Any]:
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
lowercase = hidden_size
lowercase = feat_extract_norm
lowercase = feat_extract_activation
lowercase = list(A__ )
lowercase = list(A__ )
lowercase = list(A__ )
lowercase = conv_bias
lowercase = num_conv_pos_embeddings
lowercase = num_conv_pos_embedding_groups
lowercase = len(self.conv_dim )
lowercase = num_hidden_layers
lowercase = intermediate_size
lowercase = squeeze_factor
lowercase = max_position_embeddings
lowercase = position_buckets
lowercase = share_att_key
lowercase = relative_attention
lowercase = norm_rel_ebd
lowercase = list(A__ )
lowercase = hidden_act
lowercase = num_attention_heads
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = feat_proj_dropout
lowercase = final_dropout
lowercase = layer_norm_eps
lowercase = feature_layer_norm_eps
lowercase = initializer_range
lowercase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase = apply_spec_augment
lowercase = mask_time_prob
lowercase = mask_time_length
lowercase = mask_time_min_masks
lowercase = mask_feature_prob
lowercase = mask_feature_length
lowercase = mask_feature_min_masks
# ctc loss
lowercase = ctc_loss_reduction
lowercase = ctc_zero_infinity
# sequence classification
lowercase = use_weighted_layer_sum
lowercase = classifier_proj_size
@property
def UpperCAmelCase__ (self : str ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 310 | 1 |
def _A ( _UpperCamelCase = 1_000_000 ):
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : List[Any] = {1: 1}
for inputa in range(2 , _A ):
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_UpperCAmelCase : int = (3 * number) + 1
counter += 1
if inputa not in counters:
_UpperCAmelCase : List[str] = counter
if counter > pre_counter:
_UpperCAmelCase : int = inputa
_UpperCAmelCase : Tuple = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 709 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase_ ):
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 416 | 0 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=UpperCamelCase__ , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=UpperCamelCase__ , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=UpperCamelCase__ , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=UpperCamelCase__ , default=0 , help="cuda_id." , )
UpperCAmelCase : Tuple = parser.parse_args()
return args
def lowercase_ ( _lowercase : Tuple , _lowercase : Dict , _lowercase : str ):
'''simple docstring'''
if not len(UpperCamelCase__ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
UpperCAmelCase : Optional[Any] = imgs[0].size
UpperCAmelCase : Optional[int] = Image.new("RGB" , size=(cols * w, rows * h) )
UpperCAmelCase : Union[str, Any] = grid.size
for i, img in enumerate(UpperCamelCase__ ):
grid.paste(UpperCamelCase__ , box=(i % cols * w, i // cols * h) )
return grid
def lowercase_ ( _lowercase : Dict , _lowercase : str="robotic cat with wings" , _lowercase : str=7.5 , _lowercase : List[Any]=50 , _lowercase : Optional[Any]=1 , _lowercase : Any=42 , ):
'''simple docstring'''
UpperCAmelCase : List[str] = torch.Generator(pipeline.device ).manual_seed(UpperCamelCase__ )
UpperCAmelCase : List[str] = pipeline(
UpperCamelCase__ , guidance_scale=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , generator=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , ).images
UpperCAmelCase : Union[str, Any] = int(math.sqrt(UpperCamelCase__ ) )
UpperCAmelCase : Any = image_grid(UpperCamelCase__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
snake_case_ : str = parse_args()
# Load models and create wrapper for stable diffusion
snake_case_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
snake_case_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
snake_case_ : Any = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
snake_case_ : str = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
snake_case_ : int = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
snake_case_ : Dict = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
snake_case_ : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
snake_case_ : Optional[int] = unet.to(torch.device("""cuda""", args.cuda_id))
snake_case_ : List[Any] = pipeline.to(unet.device)
snake_case_ : List[str] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
snake_case_ : Optional[int] = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 595 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase : Dict = """\
Text data.
Second line of data."""
lowerCAmelCase : str = """file"""
@pytest.fixture(scope='''session''' )
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__SCREAMING_SNAKE_CASE: Optional[Any] = bytes(UpperCamelCase__ , '''utf-8''' )
with zstd.open(UpperCamelCase__ , '''wb''' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , UpperCamelCase__ ) , '''w''' ) as f:
f.write(UpperCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__SCREAMING_SNAKE_CASE: List[str] = input_paths[compression_format]
__SCREAMING_SNAKE_CASE: List[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: List[str] = DownloadConfig(cache_dir=UpperCamelCase__ , extract_compressed_file=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Tuple = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
with open(UpperCamelCase__ ) as f:
__SCREAMING_SNAKE_CASE: Optional[Any] = f.read()
with open(UpperCamelCase__ ) as f:
__SCREAMING_SNAKE_CASE: Dict = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = '''custom_cache'''
__SCREAMING_SNAKE_CASE: Optional[int] = '''custom_extracted_dir'''
__SCREAMING_SNAKE_CASE: List[Any] = tmp_path / '''custom_extracted_path'''
if default_extracted:
__SCREAMING_SNAKE_CASE: int = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , UpperCamelCase__ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(UpperCamelCase__ ) )
__SCREAMING_SNAKE_CASE: Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__SCREAMING_SNAKE_CASE: Dict = xz_file
__SCREAMING_SNAKE_CASE: Optional[Any] = (
DownloadConfig(extract_compressed_file=UpperCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCamelCase__ )
)
__SCREAMING_SNAKE_CASE: int = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
assert Path(UpperCamelCase__ ).parent.parts[-2:] == expected
def lowerCAmelCase ( UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = str(Path(UpperCamelCase__ ).resolve() )
assert cached_path(UpperCamelCase__ ) == text_file
# relative path
__SCREAMING_SNAKE_CASE: Optional[int] = str(Path(UpperCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCamelCase__ ) == text_file
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
# relative path
__SCREAMING_SNAKE_CASE: int = '''./__missing_file__.txt'''
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(UpperCamelCase__ ) as f:
__SCREAMING_SNAKE_CASE: Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , UpperCamelCase__ )
def lowerCAmelCase ( ) -> Any:
"""simple docstring"""
with pytest.raises(UpperCamelCase__ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(UpperCamelCase__ ):
http_get('''https://huggingface.co''' , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Dict ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(UpperCamelCase__ ):
ftp_get('''ftp://huggingface.co''' , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(UpperCamelCase__ ):
fsspec_get('''s3://huggingface.co''' , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
fsspec_head('''s3://huggingface.co''' )
| 202 | 0 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
_lowercase : str = len(SCREAMING_SNAKE_CASE )
print('The following activities are selected:' )
# The first activity is always selected
_lowercase : Optional[int] = 0
print(SCREAMING_SNAKE_CASE , end=',' )
# Consider rest of the activities
for j in range(SCREAMING_SNAKE_CASE ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(SCREAMING_SNAKE_CASE , end=',' )
_lowercase : Tuple = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = [1, 3, 0, 5, 8, 5]
UpperCamelCase = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 677 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPFeatureExtractor"]
UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case = '''
Human: <<task>>
Assistant: '''
snake_case = '''huggingface-tools/default-prompts'''
snake_case = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="run" ) -> List[Any]:
if prompt_or_repo_id is None:
_snake_case = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , lowerCAmelCase_ ) is not None:
return prompt_or_repo_id
_snake_case = cached_file(
lowerCAmelCase_ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 103 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowercase_ :Optional[int] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase_ :Optional[int] = 1
if upper_limit > 0:
lowercase_ :str = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 ,upper_limit + 1 ):
for j in range(__lowerCamelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
lowerCAmelCase : Optional[int] =int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 172 | 0 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(a , a )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :int = emb.weight.shape
SCREAMING_SNAKE_CASE_ :Any = nn.Linear(a , a , bias=a )
SCREAMING_SNAKE_CASE_ :List[Any] = emb.weight.data
return lin_layer
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.load(a , map_location="cpu" )
SCREAMING_SNAKE_CASE_ :List[str] = Namespace(**checkpoint["cfg"]["model"] )
SCREAMING_SNAKE_CASE_ :str = checkpoint["model"]
remove_ignore_keys_(a )
SCREAMING_SNAKE_CASE_ :str = state_dict["decoder.embed_tokens.weight"].shape[0]
SCREAMING_SNAKE_CASE_ :Dict = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
SCREAMING_SNAKE_CASE_ :Any = XGLMConfig(
vocab_size=a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
SCREAMING_SNAKE_CASE_ :Dict = XGLMForCausalLM(a )
SCREAMING_SNAKE_CASE_ :List[str] = model.load_state_dict(a , strict=a )
print(a )
SCREAMING_SNAKE_CASE_ :Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 140 |
SCREAMING_SNAKE_CASE__ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
SCREAMING_SNAKE_CASE__ = [{"type": "code", "content": INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 140 | 1 |
import math
def a ( ) -> None:
"""simple docstring"""
_lowercase =input('Enter message: ' )
_lowercase =int(input(F'''Enter key [2-{len(A__ ) - 1}]: ''' ) )
_lowercase =input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
_lowercase =encrypt_message(A__ , A__ )
elif mode.lower().startswith('d' ):
_lowercase =decrypt_message(A__ , A__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'''Output:\n{text + "|"}''' )
def a ( A__ : int , A__ : str ) -> str:
"""simple docstring"""
_lowercase =[''] * key
for col in range(A__ ):
_lowercase =col
while pointer < len(A__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(A__ )
def a ( A__ : int , A__ : str ) -> str:
"""simple docstring"""
_lowercase =math.ceil(len(A__ ) / key )
_lowercase =key
_lowercase =(num_cols * num_rows) - len(A__ )
_lowercase =[''] * num_cols
_lowercase =0
_lowercase =0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
_lowercase =0
row += 1
return "".join(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 291 |
def a ( A__ : int ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowercase_ = int(input('Enter number: ').strip())
print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 291 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict:
"""simple docstring"""
UpperCamelCase = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_pad
def A__ ( self ) -> List[str]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
"""simple docstring"""
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
UpperCamelCase = image.size
else:
UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase = self.size["shortest_edge"]
elif w > h:
UpperCamelCase = self.size["shortest_edge"]
UpperCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase = self.size["shortest_edge"]
UpperCamelCase = self.size["shortest_edge"]
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _UpperCamelCase , unittest.TestCase ):
lowercase = ConditionalDetrImageProcessor if is_vision_available() else None
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = ConditionalDetrImageProcessingTester(self )
@property
def A__ ( self ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"image_id": 39769, "annotations": target}
# encode them
UpperCamelCase = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) )
@slow
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
UpperCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) )
# verify masks
UpperCamelCase = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) )
| 714 |
'''simple docstring'''
from math import sqrt
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = 0
for i in range(1 , int(sqrt(__UpperCamelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(__UpperCamelCase ):
total += i + n // i
elif i == sqrt(__UpperCamelCase ):
total += i
return total - n
def lowercase__ ( __UpperCamelCase = 10000 )-> int:
UpperCamelCase = sum(
i
for i in range(1 , __UpperCamelCase )
if sum_of_divisors(sum_of_divisors(__UpperCamelCase ) ) == i and sum_of_divisors(__UpperCamelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 35 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class _snake_case ( lowercase_ ):
def __init__( self , *a__ , **a__ ) -> None:
'''simple docstring'''
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ )
| 400 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_SCREAMING_SNAKE_CASE : Optional[int] = True
except (ImportError, ModuleNotFoundError):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
re.sub("<n>" , "" , snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case ) )
| 400 | 1 |
def __lowercase ( _A ) -> Dict:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowercase ( _A ) -> list[tuple[int, int]]:
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Tuple = len(_A ) # No of vertices in graph
SCREAMING_SNAKE_CASE : int = [0] * n
SCREAMING_SNAKE_CASE : Optional[Any] = [False] * n
def dfs(_A , _A , _A , _A ):
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : List[str] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_A , _A , _A , id_ )
SCREAMING_SNAKE_CASE : Tuple = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
SCREAMING_SNAKE_CASE : Tuple = min(low[at] , low[to] )
SCREAMING_SNAKE_CASE : list[tuple[int, int]] = []
for i in range(_A ):
if not visited[i]:
dfs(_A , -1 , _A , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 446 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any ="""gpt_neox_japanese"""
def __init__( self : Any , UpperCAmelCase__ : Any=3_2_0_0_0 , UpperCAmelCase__ : Dict=2_5_6_0 , UpperCAmelCase__ : List[str]=3_2 , UpperCAmelCase__ : Optional[int]=3_2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Optional[int]=1.00 , UpperCAmelCase__ : List[Any]=1_0_0_0_0 , UpperCAmelCase__ : Tuple=2_0_4_8 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Tuple=3_1_9_9_6 , UpperCAmelCase__ : Tuple=3_1_9_9_9 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Union[str, Any]=0.0 , **UpperCAmelCase__ : Optional[Any] , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_multiple_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = rotary_pct
SCREAMING_SNAKE_CASE : Tuple = rotary_emb_base
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Any = attention_dropout
SCREAMING_SNAKE_CASE : str = hidden_dropout
| 446 | 1 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = deque([]) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCAmelCase)
lowerCAmelCase = self.values[key]
def a_ ( self):
"""simple docstring"""
return (
sum(self.charge_factor - len(__lowerCAmelCase) for slot in self.values)
/ self.size_table
* self.charge_factor
)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=None):
"""simple docstring"""
if not (
len(self.values[key]) == self.charge_factor and self.values.count(__lowerCAmelCase) == 0
):
return key
return super()._collision_resolution(__lowerCAmelCase , __lowerCAmelCase)
| 370 | '''simple docstring'''
def snake_case__ ( _A: int ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(_A , _A ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 370 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__: str = logging.get_logger(__name__)
a__: str = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''markuplm'''
def __init__( self,__lowerCamelCase=3_0522,__lowerCamelCase=768,__lowerCamelCase=12,__lowerCamelCase=12,__lowerCamelCase=3072,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=512,__lowerCamelCase=2,__lowerCamelCase=0.02,__lowerCamelCase=1E-12,__lowerCamelCase=0,__lowerCamelCase=0,__lowerCamelCase=2,__lowerCamelCase=256,__lowerCamelCase=1024,__lowerCamelCase=216,__lowerCamelCase=1001,__lowerCamelCase=32,__lowerCamelCase=50,__lowerCamelCase="absolute",__lowerCamelCase=True,__lowerCamelCase=None,**__lowerCamelCase,):
super().__init__(
pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase,**__lowerCamelCase,)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
# additional properties
A__ = max_depth
A__ = max_xpath_tag_unit_embeddings
A__ = max_xpath_subs_unit_embeddings
A__ = tag_pad_id
A__ = subs_pad_id
A__ = xpath_unit_hidden_size
| 710 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
a__: List[str] = None
a__: List[Any] = {
'7B': 11_008,
'13B': 13_824,
'30B': 17_920,
'65B': 22_016,
'70B': 28_672,
}
a__: List[str] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Optional[int]=2_56 )->List[str]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def UpperCamelCase__( UpperCamelCase__ : Optional[int] )->List[Any]:
with open(UpperCamelCase__ , '''r''' ) as f:
return json.load(UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] )->Dict:
with open(UpperCamelCase__ , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=True )->Optional[int]:
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A__ = os.path.join(UpperCamelCase__ , '''tmp''' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A__ = read_json(os.path.join(UpperCamelCase__ , '''params.json''' ) )
A__ = NUM_SHARDS[model_size]
A__ = params['''n_layers''']
A__ = params['''n_heads''']
A__ = n_heads // num_shards
A__ = params['''dim''']
A__ = dim // n_heads
A__ = 10000.0
A__ = 1.0 / (base ** (torch.arange(0 , UpperCamelCase__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
A__ = params['''n_kv_heads'''] # for GQA / MQA
A__ = n_heads_per_shard // num_key_value_heads
A__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
A__ = n_heads
A__ = n_heads_per_shard
A__ = dim
# permute for sliced rotary
def permute(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=n_heads , UpperCamelCase__ : str=dim , UpperCamelCase__ : Any=dim ):
return w.view(UpperCamelCase__ , dima // n_heads // 2 , 2 , UpperCamelCase__ ).transpose(1 , 2 ).reshape(UpperCamelCase__ , UpperCamelCase__ )
print(f"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
A__ = torch.load(os.path.join(UpperCamelCase__ , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
A__ = [
torch.load(os.path.join(UpperCamelCase__ , f"consolidated.{i:02d}.pth" ) , map_location='''cpu''' )
for i in range(UpperCamelCase__ )
]
A__ = 0
A__ = {'''weight_map''': {}}
for layer_i in range(UpperCamelCase__ ):
A__ = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ = {
f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"] ),
f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"] ),
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
A__ = {
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
A__ = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for i in range(UpperCamelCase__ )
] , dim=0 , ).reshape(UpperCamelCase__ , UpperCamelCase__ ) )
A__ = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for i in range(UpperCamelCase__ )
] , dim=0 , ).reshape(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
A__ = torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for i in range(UpperCamelCase__ )
] , dim=0 , ).reshape(UpperCamelCase__ , UpperCamelCase__ )
A__ = torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(UpperCamelCase__ )] , dim=1 )
A__ = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(UpperCamelCase__ )] , dim=0 )
A__ = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(UpperCamelCase__ )] , dim=1 )
A__ = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(UpperCamelCase__ )] , dim=0 )
A__ = inv_freq
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
A__ = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
A__ = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(UpperCamelCase__ )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(UpperCamelCase__ )] , dim=0 ),
}
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
# Write configs
A__ = {'''total_size''': param_count * 2}
write_json(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''pytorch_model.bin.index.json''' ) )
A__ = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
A__ = params['''multiple_of'''] if '''multiple_of''' in params else 2_56
A__ = LlamaConfig(
hidden_size=UpperCamelCase__ , intermediate_size=compute_intermediate_size(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=UpperCamelCase__ , )
config.save_pretrained(UpperCamelCase__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
A__ = LlamaForCausalLM.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa , low_cpu_mem_usage=UpperCamelCase__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(UpperCamelCase__ , safe_serialization=UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] )->List[str]:
# Initialize the tokenizer based on the `spm` model
A__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
A__ = tokenizer_class(UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
def UpperCamelCase__( )->Tuple:
A__ = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=UpperCamelCase__ , help='''Whether or not to save using `safetensors`.''' )
A__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
A__ = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 212 | 0 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Any = '''segformer'''
def __init__( self, A=3, A=4, A=[2, 2, 2, 2], A=[8, 4, 2, 1], A=[32, 64, 160, 256], A=[7, 3, 3, 3], A=[4, 2, 2, 2], A=[1, 2, 5, 8], A=[4, 4, 4, 4], A="gelu", A=0.0, A=0.0, A=0.1, A=0.02, A=0.1, A=1E-6, A=256, A=255, **A, ):
'''simple docstring'''
super().__init__(**A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.', A, )
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = num_encoder_blocks
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[Any] = sr_ratios
SCREAMING_SNAKE_CASE : List[Any] = hidden_sizes
SCREAMING_SNAKE_CASE : List[str] = patch_sizes
SCREAMING_SNAKE_CASE : str = strides
SCREAMING_SNAKE_CASE : List[Any] = mlp_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Any = drop_path_rate
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = decoder_hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.get('reshape_last_stage', A )
SCREAMING_SNAKE_CASE : List[str] = semantic_loss_ignore_index
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = version.parse('''1.11''' )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 1E-4
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 12
| 28 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
SCREAMING_SNAKE_CASE : Tuple = truncation
SCREAMING_SNAKE_CASE : int = tokenize_kwargs
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.framework
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *A, **A ):
'''simple docstring'''
return super().__call__(*A, **A )
| 28 | 1 |
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return data[1:] + data[0]
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = """"""
for i in range(len(UpperCamelCase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = int("0b" + data[0] + data[-1] , 2 )
SCREAMING_SNAKE_CASE : Optional[int] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = message[:4]
SCREAMING_SNAKE_CASE : int = message[4:]
SCREAMING_SNAKE_CASE : List[str] = apply_table(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = xor(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = apply_sbox(UpperCamelCase__ , temp[:4] ) # noqa: E741
SCREAMING_SNAKE_CASE : List[str] = apply_sbox(UpperCamelCase__ , temp[4:] )
SCREAMING_SNAKE_CASE : str = """0""" * (2 - len(UpperCamelCase__ )) + l # noqa: E741
SCREAMING_SNAKE_CASE : List[Any] = """0""" * (2 - len(UpperCamelCase__ )) + r
SCREAMING_SNAKE_CASE : Optional[Any] = apply_table(l + r , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = xor(UpperCamelCase__ , UpperCamelCase__ )
return temp + right
if __name__ == "__main__":
snake_case = input("""Enter 10 bit key: """)
snake_case = input("""Enter 8 bit message: """)
snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
snake_case = [2, 4, 3, 1]
snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
snake_case = apply_table(key, paa_table)
snake_case = temp[:5]
snake_case = temp[5:]
snake_case = left_shift(left)
snake_case = left_shift(right)
snake_case = apply_table(left + right, pa_table)
snake_case = left_shift(left)
snake_case = left_shift(right)
snake_case = left_shift(left)
snake_case = left_shift(right)
snake_case = apply_table(left + right, pa_table)
# encryption
snake_case = apply_table(message, IP)
snake_case = function(expansion, sa, sa, keya, temp)
snake_case = temp[4:] + temp[:4]
snake_case = function(expansion, sa, sa, keya, temp)
snake_case = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
snake_case = apply_table(CT, IP)
snake_case = function(expansion, sa, sa, keya, temp)
snake_case = temp[4:] + temp[:4]
snake_case = function(expansion, sa, sa, keya, temp)
snake_case = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 720 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = ['''keras_nlp''']
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ["keras_nlp"] )
| 488 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase_ : str = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
UpperCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 331 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = BloomTokenizerFast
UpperCamelCase__ = BloomTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = '''tokenizer_file'''
UpperCamelCase__ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def lowerCAmelCase_ ( self : Dict ):
super().setUp()
a__ = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Dict ,**a__ : List[str] ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**a__ )
def lowerCAmelCase_ ( self : Tuple ):
a__ = self.get_rust_tokenizer()
a__ = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
a__ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
a__ = tokenizer.batch_encode_plus(a__ )["input_ids"]
self.assertListEqual(a__ ,a__ )
a__ = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ ,a__ )
def lowerCAmelCase_ ( self : Tuple ,a__ : List[str]=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a__ = self.rust_tokenizer_class.from_pretrained(a__ ,**a__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
a__ = "This is a simple input"
a__ = ["This is a simple input 1", "This is a simple input 2"]
a__ = ("This is a simple input", "This is a pair")
a__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(a__ ,max_length=a__ )
tokenizer_r.encode_plus(a__ ,max_length=a__ )
tokenizer_r.batch_encode_plus(a__ ,max_length=a__ )
tokenizer_r.encode(a__ ,max_length=a__ )
tokenizer_r.batch_encode_plus(a__ ,max_length=a__ )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
a__ = None # Hotfixing padding = None
self.assertRaises(a__ ,tokenizer_r.encode ,a__ ,max_length=a__ ,padding="max_length" )
# Simple input
self.assertRaises(a__ ,tokenizer_r.encode_plus ,a__ ,max_length=a__ ,padding="max_length" )
# Simple input
self.assertRaises(
a__ ,tokenizer_r.batch_encode_plus ,a__ ,max_length=a__ ,padding="max_length" ,)
# Pair input
self.assertRaises(a__ ,tokenizer_r.encode ,a__ ,max_length=a__ ,padding="max_length" )
# Pair input
self.assertRaises(a__ ,tokenizer_r.encode_plus ,a__ ,max_length=a__ ,padding="max_length" )
# Pair input
self.assertRaises(
a__ ,tokenizer_r.batch_encode_plus ,a__ ,max_length=a__ ,padding="max_length" ,)
def lowerCAmelCase_ ( self : Any ):
a__ = self.get_rust_tokenizer()
a__ = load_dataset("xnli" ,"all_languages" ,split="test" ,streaming=a__ )
a__ = next(iter(a__ ) )["premise"] # pick up one data
a__ = list(sample_data.values() )
a__ = list(map(tokenizer.encode ,a__ ) )
a__ = [tokenizer.decode(a__ ,clean_up_tokenization_spaces=a__ ) for x in output_tokens]
self.assertListEqual(a__ ,a__ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
| 331 | 1 |
import math
import unittest
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __A ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
with self.assertRaises(_snake_case ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 587 | import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
_lowerCAmelCase : Optional[Any] = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
_lowerCAmelCase : List[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case , repo_id="test-config" , push_to_hub=_snake_case , use_auth_token=self._token )
_lowerCAmelCase : Union[str, Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
_lowerCAmelCase : Tuple = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case , repo_id="valid_org/test-config-org" , push_to_hub=_snake_case , use_auth_token=self._token )
_lowerCAmelCase : Any = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self ):
CustomConfig.register_for_auto_class()
_lowerCAmelCase : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
_lowerCAmelCase : Any = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class __A ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase : Tuple = c.n_embd + 1 # int
_lowerCAmelCase : Dict = c.resid_pdrop + 1.0 # float
_lowerCAmelCase : Dict = not c.scale_attn_weights # bool
_lowerCAmelCase : int = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_snake_case , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(_snake_case , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(_snake_case , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(_snake_case , c.summary_type , "mismatch for key: summary_type" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = PretrainedConfig()
_lowerCAmelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_snake_case , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
_lowerCAmelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case )]
if len(_snake_case ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {', '.join(_snake_case )}.""" )
def SCREAMING_SNAKE_CASE__ ( self ):
with self.assertRaises(_snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
_lowerCAmelCase : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase : Tuple = mock.Mock()
_lowerCAmelCase : Any = 500
_lowerCAmelCase : Any = {}
_lowerCAmelCase : Any = HTTPError
_lowerCAmelCase : List[str] = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : Optional[int] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_snake_case ) as mock_head:
_lowerCAmelCase : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self ):
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase : Any = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[str] = AutoConfig.from_pretrained("bert-base-cased" )
_lowerCAmelCase : Tuple = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_snake_case )
_lowerCAmelCase : Optional[int] = 2
json.dump(configuration.to_dict() , open(os.path.join(_snake_case , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase : int = AutoConfig.from_pretrained(_snake_case )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase : List[Any] = ["config.42.0.0.json"]
_lowerCAmelCase : str = 768
configuration.save_pretrained(_snake_case )
shutil.move(os.path.join(_snake_case , "config.4.0.0.json" ) , os.path.join(_snake_case , "config.42.0.0.json" ) )
_lowerCAmelCase : str = AutoConfig.from_pretrained(_snake_case )
self.assertEqual(new_configuration.hidden_size , 768 )
def SCREAMING_SNAKE_CASE__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase : List[Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
_lowerCAmelCase : str = "v4.0.0"
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
_snake_case , return_unused_kwargs=_snake_case )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_snake_case , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase : Union[str, Any] = "v3.0.0"
_lowerCAmelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case )
self.assertEqual(old_configuration.hidden_size , 768 )
| 587 | 1 |
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , )-> List[Any]:
if attention_mask is None:
__UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__UpperCAmelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__UpperCamelCase )
if decoder_head_mask is None:
__UpperCAmelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__UpperCamelCase )
if cross_attn_head_mask is None:
__UpperCAmelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class UpperCAmelCase :
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=16 , __A=2 , __A=4 , __A=4 , __A="relu" , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.0 , __A=20 , __A=2 , __A=1 , __A=0 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = pad_token_id
__UpperCAmelCase = bos_token_id
def __lowerCamelCase ( self ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = self.eos_token_id # Eos Token
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase = self.get_config()
__UpperCAmelCase = prepare_mam_aaa_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return config, inputs_dict
def __lowerCamelCase ( self ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __lowerCamelCase ( self ):
__UpperCAmelCase , __UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCamelCase ( self , __A , __A ):
__UpperCAmelCase = MaMaaaModel(config=SCREAMING_SNAKE_CASE_ ).get_decoder().to(SCREAMING_SNAKE_CASE_ ).eval()
__UpperCAmelCase = inputs_dict['input_ids']
__UpperCAmelCase = inputs_dict['attention_mask']
__UpperCAmelCase = inputs_dict['head_mask']
# first forward pass
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase , __UpperCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )['last_hidden_state']
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[
'last_hidden_state'
]
# select random slice
__UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 ) )
def __lowerCamelCase ( self , __A , __A ):
__UpperCAmelCase = MaMaaaModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
__UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = outputs.encoder_last_hidden_state
__UpperCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = MaMaaaEncoder.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = MaMaaaDecoder.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
_A : Any = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_A : List[Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_A : List[str] = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_A : Union[str, Any] = True
_A : Union[str, Any] = True
_A : int = False
_A : Optional[Any] = False
def __lowerCamelCase ( self , __A , __A , __A , __A , __A ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __lowerCamelCase ( self ):
__UpperCAmelCase = MaMaaaModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase , __UpperCAmelCase = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertEqual(info['missing_keys'] , [] )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
__UpperCAmelCase = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCAmelCase = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not self.is_encoder_decoder:
__UpperCAmelCase = inputs['input_ids']
del inputs["input_ids"]
else:
__UpperCAmelCase = inputs['input_ids']
__UpperCAmelCase = inputs.get('decoder_input_ids' , SCREAMING_SNAKE_CASE_ )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = model.get_input_embeddings()
if not self.is_encoder_decoder:
__UpperCAmelCase = wte(SCREAMING_SNAKE_CASE_ )
else:
__UpperCAmelCase = wte(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = wte(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE_ )[0]
def __lowerCamelCase ( self ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase = input_dict['input_ids']
__UpperCAmelCase = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval().to(SCREAMING_SNAKE_CASE_ )
if torch_device == "cuda":
model.half()
model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
model.generate(num_beams=4 , do_sample=SCREAMING_SNAKE_CASE_ , early_stopping=SCREAMING_SNAKE_CASE_ , num_return_sequences=3 )
def _lowerCAmelCase ( _lowerCAmelCase )-> List[str]:
return torch.tensor(__UpperCamelCase , dtype=torch.long , device=__UpperCamelCase )
_A: str = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def __lowerCamelCase ( self ):
__UpperCAmelCase = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
__UpperCAmelCase = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
__UpperCAmelCase = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )[0]
__UpperCAmelCase = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
# change to expected output here
__UpperCAmelCase = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
def __lowerCamelCase ( self ):
__UpperCAmelCase = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(SCREAMING_SNAKE_CASE_ )
# change to intended input
__UpperCAmelCase = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
__UpperCAmelCase = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
__UpperCAmelCase = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )[0]
__UpperCAmelCase = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
# change to expected output here
__UpperCAmelCase = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
def __lowerCamelCase ( self ):
__UpperCAmelCase = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
__UpperCAmelCase = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
__UpperCAmelCase = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
__UpperCAmelCase = model.generate(
input_ids=dct['input_ids'].to(SCREAMING_SNAKE_CASE_ ) , attention_mask=dct['attention_mask'].to(SCREAMING_SNAKE_CASE_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
__UpperCAmelCase = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
__UpperCAmelCase = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
assert generated == expected_en
| 126 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
if hor == 1_28:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 64, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCamelCase_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCamelCase_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCamelCase_ = model
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 0 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
a_ = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
a_ = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
a_ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
a_ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
a_ = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
a_ = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
a_ = (
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ , snake_case_ : Any = randrange(len(SCREAMING_SNAKE_CASE__ ) ), randrange(len(SCREAMING_SNAKE_CASE__ ) )
snake_case_ : Tuple = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
snake_case_ , snake_case_ : Dict = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_0 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(SCREAMING_SNAKE_CASE__ ))
@pytest.mark.parametrize("""hand, expected""" , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
assert PokerHand(SCREAMING_SNAKE_CASE__ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
assert PokerHand(SCREAMING_SNAKE_CASE__ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Dict = PokerHand(SCREAMING_SNAKE_CASE__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
assert PokerHand(SCREAMING_SNAKE_CASE__ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
assert PokerHand(SCREAMING_SNAKE_CASE__ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
assert PokerHand(SCREAMING_SNAKE_CASE__ ).compare_with(PokerHand(SCREAMING_SNAKE_CASE__ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
assert PokerHand(SCREAMING_SNAKE_CASE__ ).compare_with(PokerHand(SCREAMING_SNAKE_CASE__ ) ) == expected
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = [PokerHand(SCREAMING_SNAKE_CASE__ ) for hand in SORTED_HANDS]
snake_case_ : Dict = poker_hands.copy()
shuffle(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = chain(sorted(SCREAMING_SNAKE_CASE__ ) )
for index, hand in enumerate(SCREAMING_SNAKE_CASE__ ):
assert hand == poker_hands[index]
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : List[Any] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=SCREAMING_SNAKE_CASE__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Tuple = PokerHand("""2C 4S AS 3D 5C""" )
snake_case_ : int = True
snake_case_ : Tuple = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Any = 0
snake_case_ : int = os.path.abspath(os.path.dirname(SCREAMING_SNAKE_CASE__ ) )
snake_case_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , """poker_hands.txt""" )
with open(SCREAMING_SNAKE_CASE__ ) as file_hand:
for line in file_hand:
snake_case_ : Dict = line[:1_4].strip()
snake_case_ : Any = line[1_5:].strip()
snake_case_ , snake_case_ : int = PokerHand(SCREAMING_SNAKE_CASE__ ), PokerHand(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = player.compare_with(SCREAMING_SNAKE_CASE__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 708 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 48 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
A = get_tests_dir("""fixtures""")
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Tuple = mock.Mock()
__UpperCAmelCase : str = 500
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : int = HTTPError
__UpperCAmelCase : Dict = {}
# Download this model to make sure it's in the cache.
__UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase_) as mock_head:
__UpperCAmelCase : int = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json")
def a_ ( self : int):
"""simple docstring"""
with self.assertRaises(UpperCamelCase_):
# config is in subfolder, the following should not work without specifying the subfolder
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants")
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor")
self.assertIsNotNone(UpperCamelCase_)
@is_staging_test
class a__ ( unittest.TestCase ):
@classmethod
def a_ ( cls : List[str]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = TOKEN
HfFolder.save_token(UpperCamelCase_)
@classmethod
def a_ ( cls : int):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-image-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor")
except HTTPError:
pass
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : int = ViTImageProcessor.from_pretrained(UpperCamelCase_)
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token)
__UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_))
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase_ , repo_id="test-image-processor" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token)
__UpperCAmelCase : Dict = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_))
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Any = ViTImageProcessor.from_pretrained(UpperCamelCase_)
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token)
__UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase_ , repo_id="valid_org/test-image-processor-org" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token)
__UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_))
def a_ ( self : Optional[int]):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
__UpperCAmelCase : int = CustomImageProcessor.from_pretrained(UpperCamelCase_)
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
__UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor" , trust_remote_code=UpperCamelCase_)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor")
| 77 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = ShapEPipeline
lowercase_ = ["prompt"]
lowercase_ = ["prompt"]
lowercase_ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowercase_ = False
@property
def a_ ( self : Optional[int]):
"""simple docstring"""
return 32
@property
def a_ ( self : Any):
"""simple docstring"""
return 32
@property
def a_ ( self : int):
"""simple docstring"""
return self.time_input_dim * 4
@property
def a_ ( self : List[Any]):
"""simple docstring"""
return 8
@property
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def a_ ( self : List[str]):
"""simple docstring"""
torch.manual_seed(0)
__UpperCAmelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase_)
@property
def a_ ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
__UpperCAmelCase : Union[str, Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
__UpperCAmelCase : Dict = PriorTransformer(**UpperCamelCase_)
return model
@property
def a_ ( self : Union[str, Any]):
"""simple docstring"""
torch.manual_seed(0)
__UpperCAmelCase : Tuple = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
__UpperCAmelCase : List[Any] = ShapERenderer(**UpperCamelCase_)
return model
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.dummy_prior
__UpperCAmelCase : str = self.dummy_text_encoder
__UpperCAmelCase : int = self.dummy_tokenizer
__UpperCAmelCase : int = self.dummy_renderer
__UpperCAmelCase : Tuple = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__UpperCAmelCase : str = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=0):
"""simple docstring"""
if str(UpperCamelCase_).startswith("mps"):
__UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_)
else:
__UpperCAmelCase : str = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
__UpperCAmelCase : List[Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : str = "cpu"
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_)
__UpperCAmelCase : Any = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_))
__UpperCAmelCase : Union[str, Any] = output.images[0]
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__UpperCAmelCase : Union[str, Any] = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self : Tuple):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = torch_device == "cpu"
__UpperCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.get_dummy_components()
__UpperCAmelCase : List[str] = self.pipeline_class(**UpperCamelCase_)
__UpperCAmelCase : int = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Any = 2
__UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_)
for key in inputs.keys():
if key in self.batch_params:
__UpperCAmelCase : List[Any] = batch_size * [inputs[key]]
__UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def a_ ( self : List[str]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy")
__UpperCAmelCase : Optional[Any] = ShapEPipeline.from_pretrained("openai/shap-e")
__UpperCAmelCase : Any = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Dict = torch.Generator(device=UpperCamelCase_).manual_seed(0)
__UpperCAmelCase : int = pipe(
"a shark" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_)
| 77 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def _UpperCamelCase ( __A ) -> typing.Counter[int]:
'''simple docstring'''
UpperCamelCase__ = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__A , max_perimeter + 1 ):
UpperCamelCase__ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__A ):
UpperCamelCase__ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _UpperCamelCase ( __A = 1000 ) -> int:
'''simple docstring'''
UpperCamelCase__ = pythagorean_triple(__A )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""")
| 223 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : Optional[int] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['OwlViTFeatureExtractor']
a__ : Tuple = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 223 | 1 |
'''simple docstring'''
from math import sqrt
def A__ ( __lowerCAmelCase : int = 100_0000 ):
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'{solution() = }')
| 50 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 610 | 0 |
import math
def _a ( UpperCamelCase_ : int ) -> bool:
"""simple docstring"""
lowerCAmelCase__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_UpperCamelCase )
def _a ( UpperCamelCase_ : float = 1 / 12_345 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 3
while True:
lowerCAmelCase__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_UpperCamelCase ):
lowerCAmelCase__ = int(_UpperCamelCase )
total_partitions += 1
if check_partition_perfect(_UpperCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_UpperCamelCase )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }")
| 716 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowercase__ ( _UpperCAmelCase ):
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_encoder_blocks" ) )
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[8, 4, 2, 1] , __UpperCAmelCase=[16, 32, 64, 128] , __UpperCAmelCase=[1, 4, 8, 16] , __UpperCAmelCase=[1, 2, 4, 8] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , )-> str:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = num_encoder_blocks
lowerCAmelCase__ = sr_ratios
lowerCAmelCase__ = depths
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = downsampling_rates
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = SegformerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCAmelCase )
lowerCAmelCase__ = lowerCAmelCase__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = SegformerForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCAmelCase__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = 1
lowerCAmelCase__ = SegformerForSemanticSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__UpperCAmelCase )
lowerCAmelCase__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
a_ =(
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
a_ =(
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ =True
a_ =False
a_ =False
a_ =False
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = SegformerModelTester(self )
lowerCAmelCase__ = SegformerConfigTester(self , config_class=__UpperCAmelCase )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__UpperCAmelCase )
@unittest.skip("SegFormer does not use inputs_embeds" )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ = outputs.attentions
lowerCAmelCase__ = sum(self.model_tester.depths )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# verify the first attentions (first block, first layer)
lowerCAmelCase__ = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCAmelCase__ = (self.model_tester.image_size // 32) ** 2
lowerCAmelCase__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCAmelCase__ = len(__UpperCAmelCase )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# verify the first attentions (first block, first layer)
lowerCAmelCase__ = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ):
continue
lowerCAmelCase__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
lowerCAmelCase__ = model(**__UpperCAmelCase ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = SegformerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__UpperCAmelCase , align=__UpperCAmelCase , do_random_crop=__UpperCAmelCase )
lowerCAmelCase__ = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
__UpperCAmelCase )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=__UpperCAmelCase , return_tensors="pt" )
lowerCAmelCase__ = encoded_inputs.pixel_values.to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ = model(__UpperCAmelCase )
lowerCAmelCase__ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
lowerCAmelCase__ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__UpperCAmelCase , align=__UpperCAmelCase , do_random_crop=__UpperCAmelCase )
lowerCAmelCase__ = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(__UpperCAmelCase )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=__UpperCAmelCase , return_tensors="pt" )
lowerCAmelCase__ = encoded_inputs.pixel_values.to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ = model(__UpperCAmelCase )
lowerCAmelCase__ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
lowerCAmelCase__ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-1 ) )
@slow
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__UpperCAmelCase , align=__UpperCAmelCase , do_random_crop=__UpperCAmelCase )
lowerCAmelCase__ = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
__UpperCAmelCase )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=__UpperCAmelCase , return_tensors="pt" )
lowerCAmelCase__ = encoded_inputs.pixel_values.to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ = model(__UpperCAmelCase )
lowerCAmelCase__ = outputs.logits.detach().cpu()
lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
lowerCAmelCase__ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
lowerCAmelCase__ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 115 | 0 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : Union[str, Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
UpperCAmelCase_ : Union[str, Any] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
UpperCAmelCase_ : Dict = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( _lowerCamelCase, unittest.TestCase ):
'''simple docstring'''
lowercase : str = CamembertTokenizer
lowercase : Optional[Any] = CamembertTokenizerFast
lowercase : str = True
lowercase : Dict = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE =CamembertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='''<pad>'''
_SCREAMING_SNAKE_CASE =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_A ) , 1_0_0_4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =CamembertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE ='''I was born in 92000, and this is falsé.'''
_SCREAMING_SNAKE_CASE =tokenizer.encode(_A )
_SCREAMING_SNAKE_CASE =rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
_SCREAMING_SNAKE_CASE =tokenizer.encode(_A , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE =rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_SCREAMING_SNAKE_CASE =tokenizer.convert_ids_to_tokens(_A )
_SCREAMING_SNAKE_CASE =rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE ='''I was born in 92000, and this is falsé.'''
_SCREAMING_SNAKE_CASE =tokenizer.tokenize(_A )
_SCREAMING_SNAKE_CASE =rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_SCREAMING_SNAKE_CASE =tokenizer.encode(_A , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE =rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =tokenizer.encode(_A )
_SCREAMING_SNAKE_CASE =rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={'''input_ids''': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_SCREAMING_SNAKE_CASE =[
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=_A , )
| 255 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =tempfile.mkdtemp()
_lowercase =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_lowercase ={
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
'do_convert_rgb': True,
}
_lowercase =os.path.join(self.tmpdirname , lowerCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowerCAmelCase , lowerCAmelCase )
def A__ ( self , **lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def A__ ( self , **lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def A__ ( self , **lowerCAmelCase ) -> Any:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowercase =[Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =self.get_tokenizer()
_lowercase =self.get_rust_tokenizer()
_lowercase =self.get_image_processor()
_lowercase =ChineseCLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase )
_lowercase =ChineseCLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase =self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
_lowercase =self.get_image_processor(do_normalize=lowerCAmelCase )
_lowercase =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=lowerCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =ChineseCLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
_lowercase =self.prepare_image_inputs()
_lowercase =image_processor(lowerCAmelCase , return_tensors='np' )
_lowercase =processor(images=lowerCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =ChineseCLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
_lowercase ='Alexandra,T-shirt的价格是15便士。'
_lowercase =processor(text=lowerCAmelCase )
_lowercase =tokenizer(lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =ChineseCLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
_lowercase ='Alexandra,T-shirt的价格是15便士。'
_lowercase =self.prepare_image_inputs()
_lowercase =processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =ChineseCLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
_lowercase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase =processor.batch_decode(lowerCAmelCase )
_lowercase =tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =ChineseCLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
_lowercase ='Alexandra,T-shirt的价格是15便士。'
_lowercase =self.prepare_image_inputs()
_lowercase =processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 291 | 0 |
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _snake_case ( lowercase__ ):
_lowerCamelCase : int = []
if isinstance(lowercase__ , lowercase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase__ ) )
elif isinstance(lowercase__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase__ ) )
elif isinstance(lowercase__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : List[Any] = []
for d in reversed(lowercase__ ):
idx.append(flat_idx % d )
_lowerCamelCase : Optional[Any] = flat_idx // d
return tuple(reversed(lowercase__ ) )
@torch.jit.ignore
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowercase__ ) -> None:
_lowerCamelCase : Any = True
for i in range(len(lowercase__ ) ):
_lowerCamelCase : Optional[int] = -1 * (i + 1)
l[reversed_idx] &= tally
_lowerCamelCase : List[str] = l[reversed_idx]
if start_edges is None:
_lowerCamelCase : Optional[Any] = [s == 0 for s in start]
reduce_edge_list(lowercase__ )
if end_edges is None:
_lowerCamelCase : Union[str, Any] = [e == (d - 1) for e, d in zip(lowercase__ , lowercase__ )]
reduce_edge_list(lowercase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase__ ) == 0:
return [()]
elif len(lowercase__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
_lowerCamelCase : List[Tuple[slice, ...]] = []
_lowerCamelCase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase__ , lowercase__ ):
if s == e:
path_list.append(slice(lowercase__ , s + 1 ) )
else:
break
_lowerCamelCase : Tuple[slice, ...] = tuple(lowercase__ )
_lowerCamelCase : Tuple = len(lowercase__ )
# start == end, and we're done
if divergence_idx == len(lowercase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_lowerCamelCase : Optional[int] = start[divergence_idx]
return tuple(
path + (slice(lowercase__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_lowerCamelCase : Union[str, Any] = end[divergence_idx]
return tuple(
path + (slice(lowercase__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_lowerCamelCase : int = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[int] = t.shape[:no_batch_dims]
_lowerCamelCase : Any = list(_flat_idx_to_idx(lowercase__ , lowercase__ ) )
# _get_minimal_slice_set is inclusive
_lowerCamelCase : Any = list(_flat_idx_to_idx(flat_end - 1 , lowercase__ ) )
# Get an ordered list of slices to perform
_lowerCamelCase : int = _get_minimal_slice_set(
lowercase__ , lowercase__ , lowercase__ , )
_lowerCamelCase : List[str] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = False , lowercase__ = None , lowercase__ = False , ):
if not (len(lowercase__ ) > 0):
raise ValueError('Must provide at least one input' )
_lowerCamelCase : Union[str, Any] = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase__ )]
_lowerCamelCase : Optional[int] = tuple([max(lowercase__ ) for s in zip(*lowercase__ )] )
def _prep_inputs(lowercase__ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_lowerCamelCase : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_lowerCamelCase : Tuple = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
_lowerCamelCase : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_lowerCamelCase : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowercase__ )
_lowerCamelCase : Union[str, Any] = None
if _out is not None:
_lowerCamelCase : Optional[int] = tensor_tree_map(lambda lowercase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
_lowerCamelCase : Optional[int] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_lowerCamelCase : Any = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase__ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_lowerCamelCase : int = 0
_lowerCamelCase : List[Any] = prepped_outputs
for _ in range(lowercase__ ):
# Chunk the input
if not low_mem:
_lowerCamelCase : List[str] = _select_chunk
else:
_lowerCamelCase : str = partial(
_chunk_slice , flat_start=lowercase__ , flat_end=min(lowercase__ , i + chunk_size ) , no_batch_dims=len(lowercase__ ) , )
_lowerCamelCase : Dict[str, Any] = tensor_tree_map(lowercase__ , lowercase__ )
# Run the layer on the chunk
_lowerCamelCase : Union[str, Any] = layer(**lowercase__ )
# Allocate space for the output
if out is None:
_lowerCamelCase : Any = tensor_tree_map(lambda lowercase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowercase__ )
# Put the chunk in its pre-allocated space
if isinstance(lowercase__ , lowercase__ ):
def assign(lowercase__ , lowercase__ ) -> None:
for k, v in da.items():
if isinstance(lowercase__ , lowercase__ ):
assign(lowercase__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_lowerCamelCase : int = da[k]
assign(lowercase__ , lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
for xa, xa in zip(lowercase__ , lowercase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_lowerCamelCase : str = xa
elif isinstance(lowercase__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_lowerCamelCase : int = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
_lowerCamelCase : Optional[Any] = tensor_tree_map(lambda lowercase__ : t.view(orig_batch_dims + t.shape[1:] ) , lowercase__ )
return out
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase = 512 , ):
_lowerCamelCase : int = max_chunk_size
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Optional[tuple] = None
def A_ ( self , lowercase , lowercase , lowercase ):
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_lowerCamelCase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
_lowerCamelCase : Optional[Any] = [c for c in candidates if c > min_chunk_size]
_lowerCamelCase : Tuple = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowercase ) -> bool:
try:
with torch.no_grad():
fn(*lowercase , chunk_size=lowercase )
return True
except RuntimeError:
return False
_lowerCamelCase : Dict = 0
_lowerCamelCase : List[Any] = len(lowercase ) - 1
while i > min_viable_chunk_size_index:
_lowerCamelCase : Optional[int] = test_chunk_size(candidates[i] )
if not viable:
_lowerCamelCase : Any = (min_viable_chunk_size_index + i) // 2
else:
_lowerCamelCase : Optional[Any] = i
_lowerCamelCase : Optional[int] = (i + len(lowercase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : int = True
for aa, aa in zip(lowercase , lowercase ):
assert type(lowercase ) == type(lowercase )
if isinstance(lowercase , (list, tuple) ):
consistent &= self._compare_arg_caches(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
_lowerCamelCase : Any = [v for _, v in sorted(aa.items() , key=lambda lowercase : x[0] )]
_lowerCamelCase : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda lowercase : x[0] )]
consistent &= self._compare_arg_caches(lowercase , lowercase )
else:
consistent &= aa == aa
return consistent
def A_ ( self , lowercase , lowercase , lowercase , ):
_lowerCamelCase : List[Any] = True
_lowerCamelCase : tuple = tree_map(lambda lowercase : a.shape if isinstance(lowercase , torch.Tensor ) else a , lowercase , lowercase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowercase )
_lowerCamelCase : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , lowercase )
else:
# Otherwise, we can reuse the precomputed value
_lowerCamelCase : List[str] = False
if not consistent:
_lowerCamelCase : List[Any] = self._determine_favorable_chunk_size(
lowercase , lowercase , lowercase , )
_lowerCamelCase : List[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size | 721 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """deit"""
def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=True , lowercase=16 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[Any] = image_size
_lowerCamelCase : Optional[int] = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Dict = qkv_bias
_lowerCamelCase : Dict = encoder_stride
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = version.parse("""1.11""" )
@property
def A_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A_ ( self ):
return 1E-4 | 492 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Optional[Any] , __magic_name__ : Dict=-1 ):
"""simple docstring"""
lowerCAmelCase__ = label_idx
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[Split, str] ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(__magic_name__ , f"""{mode}.txt""" )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(__magic_name__ , encoding="utf-8" ) as f:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__magic_name__ , labels=__magic_name__ ) )
guid_index += 1
lowerCAmelCase__ = []
lowerCAmelCase__ = []
else:
lowerCAmelCase__ = line.split(" " )
words.append(splits[0] )
if len(__magic_name__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__magic_name__ , labels=__magic_name__ ) )
return examples
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : TextIO , __magic_name__ : TextIO , __magic_name__ : List ):
"""simple docstring"""
lowerCAmelCase__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(__magic_name__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(__magic_name__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : str ):
"""simple docstring"""
if path:
with open(__magic_name__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Optional[int] ):
"""simple docstring"""
super().__init__(label_idx=-2 )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
if path:
with open(__magic_name__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Union[Split, str] ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(__magic_name__ , f"""{mode}.txt""" )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(__magic_name__ , encoding="utf-8" ) as f:
for sentence in parse_incr(__magic_name__ ):
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(__magic_name__ ) == len(__magic_name__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__magic_name__ , labels=__magic_name__ ) )
guid_index += 1
return examples
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : TextIO , __magic_name__ : TextIO , __magic_name__ : List ):
"""simple docstring"""
lowerCAmelCase__ = 0
for sentence in parse_incr(__magic_name__ ):
lowerCAmelCase__ = preds_list[example_id]
lowerCAmelCase__ = ""
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(__magic_name__ )
example_id += 1
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ):
"""simple docstring"""
if path:
with open(__magic_name__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 48 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _a ( UpperCAmelCase__ ) -> Dict:
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
__SCREAMING_SNAKE_CASE = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(UpperCAmelCase__ )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split('''.''' )
__SCREAMING_SNAKE_CASE = int(key_split[1] )
if "decoder_blocks" in key:
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = '''decoder.decoder_layers.'''
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
elif "bias" in key:
__SCREAMING_SNAKE_CASE = val[:dim]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE = val[-dim:]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = '''vit.encoder.layer.'''
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
elif "bias" in key:
__SCREAMING_SNAKE_CASE = val[:dim]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE = val[-dim:]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
__SCREAMING_SNAKE_CASE = ViTMAEConfig()
if "large" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 10_24
__SCREAMING_SNAKE_CASE = 40_96
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
elif "huge" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 14
__SCREAMING_SNAKE_CASE = 12_80
__SCREAMING_SNAKE_CASE = 51_20
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = ViTMAEForPreTraining(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location='''cpu''' )['''model''']
__SCREAMING_SNAKE_CASE = ViTMAEImageProcessor(size=config.image_size )
__SCREAMING_SNAKE_CASE = convert_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
__SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
__SCREAMING_SNAKE_CASE = ViTMAEImageProcessor(size=config.image_size )
__SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
__SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = outputs.logits
if "large" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCAmelCase__ =parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 482 | 0 |
from sklearn.metrics import fa_score
import datasets
snake_case__ : Any = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
snake_case__ : Tuple = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
snake_case__ : Tuple = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ (datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self : Any ) ->Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def _lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : Any , __a : str=None , __a : Union[str, Any]=1 , __a : Dict="binary" , __a : Any=None ) ->Optional[Any]:
lowerCamelCase_ : Optional[Any] = fa_score(
__a , __a , labels=__a , pos_label=__a , average=__a , sample_weight=__a )
return {"f1": float(__a ) if score.size == 1 else score}
| 171 |
import qiskit
def __lowerCamelCase ( A__ : int = 2 ) -> qiskit.result.counts.Counts:
lowerCamelCase_ : List[Any] = qubits
# Using Aer's simulator
lowerCamelCase_ : Tuple = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
lowerCamelCase_ : int = qiskit.QuantumCircuit(A__ , A__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , A__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , A__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(A__ ) ) , list(range(A__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCamelCase_ : Union[str, Any] = qiskit.execute(A__ , A__ , shots=1000 )
return job.result().get_counts(A__ )
if __name__ == "__main__":
print(F'Total count for various states are: {quantum_entanglement(3)}')
| 171 | 1 |
import baseaa
def a ( A__ ) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode('''utf-8''' ) )
def a ( A__ ) -> str:
'''simple docstring'''
return baseaa.baadecode(A__ ).decode('''utf-8''' )
if __name__ == "__main__":
a_ :Tuple = 'Hello World!'
a_ :List[str] = baseaa_encode(test)
print(encoded)
a_ :Optional[int] = baseaa_decode(encoded)
print(decoded)
| 35 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int = 10_00 ) -> int:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = 1, 1
__lowerCAmelCase = []
for i in range(1 , n + 1 ):
__lowerCAmelCase = prev_numerator + 2 * prev_denominator
__lowerCAmelCase = prev_numerator + prev_denominator
if len(str(snake_case_ ) ) > len(str(snake_case_ ) ):
result.append(snake_case_ )
__lowerCAmelCase = numerator
__lowerCAmelCase = denominator
return len(snake_case_ )
if __name__ == "__main__":
print(f'{solution() = }')
| 330 | '''simple docstring'''
class _lowercase :
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> Dict:
__lowerCAmelCase = data
__lowerCAmelCase = previous
__lowerCAmelCase = next_node
def __str__( self : Optional[Any] ) -> str:
return f"""{self.data}"""
def a ( self : Optional[Any] ) -> int:
return self.data
def a ( self : List[str] ) -> List[str]:
return self.next
def a ( self : List[Any] ) -> Union[str, Any]:
return self.previous
class _lowercase :
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
__lowerCAmelCase = head
def __iter__( self : Tuple ) -> Union[str, Any]:
return self
def a ( self : Optional[Any] ) -> Tuple:
if not self.current:
raise StopIteration
else:
__lowerCAmelCase = self.current.get_data()
__lowerCAmelCase = self.current.get_next()
return value
class _lowercase :
'''simple docstring'''
def __init__( self : Dict ) -> int:
__lowerCAmelCase = None # First node in list
__lowerCAmelCase = None # Last node in list
def __str__( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.head
__lowerCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
__lowerCAmelCase = current.get_next()
return " ".join(str(SCREAMING_SNAKE_CASE__ ) for node in nodes )
def __contains__( self : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
__lowerCAmelCase = self.head
while current:
if current.get_data() == value:
return True
__lowerCAmelCase = current.get_next()
return False
def __iter__( self : List[Any] ) -> int:
return LinkedListIterator(self.head )
def a ( self : List[Any] ) -> List[str]:
if self.head:
return self.head.get_data()
return None
def a ( self : Dict ) -> List[Any]:
if self.tail:
return self.tail.get_data()
return None
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Node ) -> None:
if self.head is None:
__lowerCAmelCase = node
__lowerCAmelCase = node
else:
self.insert_before_node(self.head , SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Node ) -> None:
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE__ )
else:
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCAmelCase = Node(SCREAMING_SNAKE_CASE__ )
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE__ )
else:
self.set_tail(SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Node , SCREAMING_SNAKE_CASE__ : Node ) -> None:
__lowerCAmelCase = node
__lowerCAmelCase = node.previous
if node.get_previous() is None:
__lowerCAmelCase = node_to_insert
else:
__lowerCAmelCase = node_to_insert
__lowerCAmelCase = node_to_insert
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Node , SCREAMING_SNAKE_CASE__ : Node ) -> None:
__lowerCAmelCase = node
__lowerCAmelCase = node.next
if node.get_next() is None:
__lowerCAmelCase = node_to_insert
else:
__lowerCAmelCase = node_to_insert
__lowerCAmelCase = node_to_insert
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCAmelCase = 1
__lowerCAmelCase = Node(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return
current_position += 1
__lowerCAmelCase = node.next
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE__ )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> Node:
__lowerCAmelCase = self.head
while node:
if node.get_data() == item:
return node
__lowerCAmelCase = node.get_next()
raise Exception("""Node not found""" )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
if (node := self.get_node(SCREAMING_SNAKE_CASE__ )) is not None:
if node == self.head:
__lowerCAmelCase = self.head.get_next()
if node == self.tail:
__lowerCAmelCase = self.tail.get_previous()
self.remove_node_pointers(SCREAMING_SNAKE_CASE__ )
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : Node ) -> None:
if node.get_next():
__lowerCAmelCase = node.previous
if node.get_previous():
__lowerCAmelCase = node.next
__lowerCAmelCase = None
__lowerCAmelCase = None
def a ( self : Optional[int] ) -> Any:
return self.head is None
def UpperCamelCase_ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : List[str] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ['''MobileViTFeatureExtractor''']
__lowerCAmelCase : Optional[int] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
__SCREAMING_SNAKE_CASE : Dict = min_depth
__SCREAMING_SNAKE_CASE : List[str] = tf_padding
__SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __magic_name__( self :List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_lowercase = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def __UpperCamelCase ( a : Dict=True ) ->str:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__a ) )
class _lowercase ( __a ):
_UpperCAmelCase = None
_UpperCAmelCase = None
def UpperCamelCase ( self , A__ , A__ ) -> str:
with TemporaryDirectory() as tmp_dir:
snake_case = dataset_module_factory(A__ , cache_dir=A__ )
snake_case = import_main_class(dataset_module.module_path , dataset=A__ )
snake_case = builder_cls(
cache_dir=A__ , config_name=A__ , hash=dataset_module.hash , )
snake_case = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=A__ ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
snake_case = cached_path(A__ , cache_dir=A__ )
self.assertTrue(os.path.exists(A__ ) )
@pytest.mark.integration
def __UpperCamelCase ( a : List[str] ) ->Any:
snake_case = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a )
snake_case = import_main_class(dataset_module.module_path )
snake_case = builder_cls(
cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
snake_case = None
builder_instance.download_and_prepare()
snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __UpperCamelCase ( a : Any ) ->Union[str, Any]:
snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a )
snake_case = import_main_class(dataset_module.module_path , dataset=a )
snake_case = builder_cls(
cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , )
snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a , a )
assert "train" in ds
assert isinstance(ds['''train'''] , a )
assert next(iter(ds['''train'''] ) )
| 44 | 0 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any]=7_68 ):
super().__init__(__lowerCamelCase )
UpperCamelCase__ :Union[str, Any] = proj_size
UpperCamelCase__ :Optional[Any] = CLIPVisionModel(__lowerCamelCase )
UpperCamelCase__ :int = PaintByExampleMapper(__lowerCamelCase )
UpperCamelCase__ :Optional[int] = nn.LayerNorm(config.hidden_size )
UpperCamelCase__ :Tuple = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCamelCase__ :Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __a ( self :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str]=False ):
UpperCamelCase__ :int = self.model(pixel_values=__lowerCamelCase )
UpperCamelCase__ :int = clip_output.pooler_output
UpperCamelCase__ :List[str] = self.mapper(latent_states[:, None] )
UpperCamelCase__ :Optional[Any] = self.final_layer_norm(__lowerCamelCase )
UpperCamelCase__ :Optional[Any] = self.proj_out(__lowerCamelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self :List[str] , lowerCamelCase__ :str ):
super().__init__()
UpperCamelCase__ :int = (config.num_hidden_layers + 1) // 5
UpperCamelCase__ :List[Any] = config.hidden_size
UpperCamelCase__ :Union[str, Any] = 1
UpperCamelCase__ :Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , activation_fn="""gelu""" , attention_bias=__lowerCamelCase )
for _ in range(__lowerCamelCase )
] )
def __a ( self :Optional[int] , lowerCamelCase__ :int ):
for block in self.blocks:
UpperCamelCase__ :Any = block(__lowerCamelCase )
return hidden_states | 45 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase__ =subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
UpperCamelCase__ =subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode('utf-8').split()
UpperCamelCase__ ='|'.join(sys.argv[1:])
UpperCamelCase__ =re.compile(Rf"^({joined_dirs}).*?\.py$")
UpperCamelCase__ =[x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='') | 249 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
SCREAMING_SNAKE_CASE_ = ''
SCREAMING_SNAKE_CASE_ = ''
SCREAMING_SNAKE_CASE_ = ''
SCREAMING_SNAKE_CASE_ = 1 # (0 is vertical, 1 is horizontal)
def __lowercase ( ) -> None:
"""simple docstring"""
__a , __a = get_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print("""Processing...""" )
__a , __a , __a = update_image_and_anno(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for index, image in enumerate(__SCREAMING_SNAKE_CASE ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__a = random_chars(32 )
__a = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
__a = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , __SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(__SCREAMING_SNAKE_CASE )} with {file_name}''' )
__a = []
for anno in new_annos[index]:
__a = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__SCREAMING_SNAKE_CASE )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> tuple[list, list]:
"""simple docstring"""
__a = []
__a = []
for label_file in glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , """*.txt""" ) ):
__a = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__SCREAMING_SNAKE_CASE ) as in_file:
__a = in_file.readlines()
__a = os.path.join(__SCREAMING_SNAKE_CASE , F'''{label_name}.jpg''' )
__a = []
for obj_list in obj_lists:
__a = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__SCREAMING_SNAKE_CASE )
labels.append(__SCREAMING_SNAKE_CASE )
return img_paths, labels
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 ) -> tuple[list, list, list]:
"""simple docstring"""
__a = []
__a = []
__a = []
for idx in range(len(__SCREAMING_SNAKE_CASE ) ):
__a = []
__a = img_list[idx]
path_list.append(__SCREAMING_SNAKE_CASE )
__a = anno_list[idx]
__a = cva.imread(__SCREAMING_SNAKE_CASE )
if flip_type == 1:
__a = cva.flip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for bbox in img_annos:
__a = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__a = cva.flip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for bbox in img_annos:
__a = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__SCREAMING_SNAKE_CASE )
new_imgs_list.append(__SCREAMING_SNAKE_CASE )
return new_imgs_list, new_annos_lists, path_list
def __lowercase ( __SCREAMING_SNAKE_CASE = 32 ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__a = ascii_lowercase + digits
return "".join(random.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 704 |
'''simple docstring'''
from math import pi, sqrt
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(__SCREAMING_SNAKE_CASE ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__SCREAMING_SNAKE_CASE )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __lowercase ( ) -> None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(__SCREAMING_SNAKE_CASE )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('Gamma of: '))
print(f"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 201 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase_ = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = 'encoder-decoder'
lowerCamelCase : Optional[Any] = True
def __init__( self: str , **_UpperCAmelCase: int ):
super().__init__(**_UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' )
_lowerCAmelCase :Dict = encoder_config.pop('model_type' )
_lowerCAmelCase :str = kwargs.pop('decoder' )
_lowerCAmelCase :str = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Any = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCAmelCase :Dict = True
_lowerCAmelCase :List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase :Optional[int] = self.encoder.to_dict()
_lowerCAmelCase :Union[str, Any] = self.decoder.to_dict()
_lowerCAmelCase :List[str] = self.__class__.model_type
return output | 687 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A : List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
'''simple docstring'''
lowerCAmelCase__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase__ = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase__ = field(default=_lowerCamelCase , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowercase :
'''simple docstring'''
lowerCAmelCase__ = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
lowerCAmelCase__ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase_ ( ):
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome.")
lowerCamelCase__ = import_module("tasks")
try:
lowerCamelCase__ = getattr(lowerCamelCase__ , model_args.task_type)
lowerCamelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''')
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCamelCase__)
# Set seed
set_seed(training_args.seed)
# Prepare CONLL-2003 task
lowerCamelCase__ = token_classification_task.get_labels(data_args.labels)
lowerCamelCase__ = dict(enumerate(lowerCamelCase__))
lowerCamelCase__ = len(lowerCamelCase__)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid={label: i for i, label in enumerate(lowerCamelCase__)} , cache_dir=model_args.cache_dir , )
lowerCamelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCamelCase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCamelCase__ = (
TokenClassificationDataset(
token_classification_task=lowerCamelCase__ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , labels=lowerCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCamelCase__ = (
TokenClassificationDataset(
token_classification_task=lowerCamelCase__ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , labels=lowerCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowercase__ , lowercase__) -> Tuple[List[int], List[int]]:
lowerCamelCase__ = np.argmax(lowerCamelCase__ , axis=2)
lowerCamelCase__ = preds.shape
lowerCamelCase__ = [[] for _ in range(lowerCamelCase__)]
lowerCamelCase__ = [[] for _ in range(lowerCamelCase__)]
for i in range(lowerCamelCase__):
for j in range(lowerCamelCase__):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
return preds_list, out_label_list
def compute_metrics(lowercase__) -> Dict:
lowerCamelCase__ = align_predictions(p.predictions , p.label_ids)
return {
"accuracy_score": accuracy_score(lowerCamelCase__ , lowerCamelCase__),
"precision": precision_score(lowerCamelCase__ , lowerCamelCase__),
"recall": recall_score(lowerCamelCase__ , lowerCamelCase__),
"f1": fa_score(lowerCamelCase__ , lowerCamelCase__),
}
# Data collator
lowerCamelCase__ = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
lowerCamelCase__ = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
lowerCamelCase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCamelCase__ = trainer.evaluate()
lowerCamelCase__ = os.path.join(training_args.output_dir , "eval_results.txt")
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(" %s = %s" , lowerCamelCase__ , lowerCamelCase__)
writer.write("%s = %s\n" % (key, value))
results.update(lowerCamelCase__)
# Predict
if training_args.do_predict:
lowerCamelCase__ = TokenClassificationDataset(
token_classification_task=lowerCamelCase__ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , labels=lowerCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCamelCase__ = trainer.predict(lowerCamelCase__)
lowerCamelCase__ = align_predictions(lowerCamelCase__ , lowerCamelCase__)
lowerCamelCase__ = os.path.join(training_args.output_dir , "test_results.txt")
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , "w") as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , lowerCamelCase__ , lowerCamelCase__)
writer.write("%s = %s\n" % (key, value))
# Save predictions
lowerCamelCase__ = os.path.join(training_args.output_dir , "test_predictions.txt")
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , "w") as writer:
with open(os.path.join(data_args.data_dir , "test.txt") , "r") as f:
token_classification_task.write_predictions_to_file(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
return results
def lowerCamelCase_ ( lowercase__):
main()
if __name__ == "__main__":
main()
| 710 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Tuple = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "gpt_neo"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : str , __lowerCamelCase : Optional[int]=50257 , __lowerCamelCase : Optional[int]=2048 , __lowerCamelCase : Optional[int]=2048 , __lowerCamelCase : Any=24 , __lowerCamelCase : List[Any]=[[["global", "local"], 12]] , __lowerCamelCase : Any=16 , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[Any]=256 , __lowerCamelCase : List[Any]="gelu_new" , __lowerCamelCase : Any=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Dict=1E-5 , __lowerCamelCase : List[Any]=0.0_2 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=50256 , __lowerCamelCase : Tuple=50256 , **__lowerCamelCase : Optional[int] , ) -> str:
'''simple docstring'''
lowerCamelCase__ = vocab_size
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_layers
lowerCamelCase__ = num_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = window_size
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_dropout
lowerCamelCase__ = embed_dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = classifier_dropout
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
lowerCamelCase__ = attention_types
lowerCamelCase__ = self.expand_attention_types_params(__lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@staticmethod
def a__ ( __lowerCamelCase : int ) -> str:
'''simple docstring'''
lowerCamelCase__ = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__):
import torch
lowerCamelCase__ = input.size()
lowerCamelCase__ = len(lowercase__)
lowerCamelCase__ = shape[dimension]
lowerCamelCase__ = torch.arange(0 , lowercase__ , lowercase__)
lowerCamelCase__ = torch.div(sizedim - size , lowercase__ , rounding_mode="floor") + 1
lowerCamelCase__ = torch.arange(lowercase__) + low_indices[:min_length][:, None]
lowerCamelCase__ = [slice(lowercase__)] * rank
lowerCamelCase__ = indices
lowerCamelCase__ = input[s]
lowerCamelCase__ = list(range(0 , rank + 1))
perm.append(perm.pop(dimension + 1))
return sliced.permute(lowercase__)
def lowerCamelCase_ ( lowercase__ , lowercase__):
import torch
lowerCamelCase__ = torch.arange(1 , lowercase__)
lowerCamelCase__ = torch.remainder(lowercase__ , lowercase__)
lowerCamelCase__ = remainders == 0
lowerCamelCase__ = candidates[divisor_indices]
lowerCamelCase__ = torch.max(lowercase__)
return largest_divisor, torch.div(lowercase__ , lowercase__ , rounding_mode="floor")
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
@property
def a__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowerCamelCase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
lowerCamelCase__ = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCamelCase__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def a__ ( self : Any ) -> int:
'''simple docstring'''
return self._config.num_heads
def a__ ( self : Optional[Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowerCamelCase__ = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["attention_mask"]
if self.use_past:
lowerCamelCase__ = ordered_inputs["attention_mask"].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def a__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return 13
| 187 | 0 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( a_ : Any , a_ : List[Any] , a_ : Optional[Any] ):
# Initialise PyTorch model
__a = TaConfig.from_json_file(a_ )
print(f"Building PyTorch model from configuration: {config}" )
__a = TaForConditionalGeneration(a_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(a_ , a_ , a_ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 539 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 539 | 1 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ = logging.getLogger()
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = "\n".join(A__ )
Path(A__ ).open("w" ).writelines(A__ )
lowerCamelCase_ = '''patrickvonplaten/t5-tiny-random'''
lowerCamelCase_ = '''sshleifer/bart-tiny-random'''
lowerCamelCase_ = '''sshleifer/tiny-mbart'''
lowerCamelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class UpperCamelCase_ (__A ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> Tuple:
UpperCAmelCase_ : Dict = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
UpperCAmelCase_ : int = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
UpperCAmelCase_ : int = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
UpperCAmelCase_ : Optional[int] = "translation_en_to_de" if model == T5_TINY else "summarization"
UpperCAmelCase_ : Optional[Any] = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_ ):
run_generate()
assert Path(lowerCAmelCase_ ).exists()
# os.remove(Path(output_file_name))
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
self.run_eval_tester(lowerCAmelCase_ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Any ) -> Optional[Any]:
self.run_eval_tester(lowerCAmelCase_ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Tuple ) -> Tuple:
UpperCAmelCase_ : str = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
UpperCAmelCase_ : List[Any] = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
UpperCAmelCase_ : Union[str, Any] = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
UpperCAmelCase_ : int = Path(self.get_auto_remove_tmp_dir() )
UpperCAmelCase_ : Optional[int] = str(tmp_dir / "scores.json" )
UpperCAmelCase_ : str = str(tmp_dir / "val.target" )
_dump_articles(lowerCAmelCase_ , text["en"] )
_dump_articles(lowerCAmelCase_ , text["de"] )
UpperCAmelCase_ : List[str] = "translation_en_to_de" if model == T5_TINY else "summarization"
UpperCAmelCase_ : List[Any] = f"""
run_eval_search.py
{model}
{str(lowerCAmelCase_ )}
{str(lowerCAmelCase_ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_ ):
with CaptureStdout() as cs:
run_search()
UpperCAmelCase_ : Union[str, Any] = [" num_beams | length_penalty", model, "Best score args"]
UpperCAmelCase_ : Any = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowerCAmelCase_ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCAmelCase_ ).exists()
os.remove(Path(lowerCAmelCase_ ) )
| 463 |
"""simple docstring"""
from itertools import count
def snake_case ( A__ = 50 ):
UpperCAmelCase_ : Any = [1] * min_block_length
for n in count(A__ ):
fill_count_functions.append(1 )
for block_length in range(A__ ,n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'{solution() = }')
| 463 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCAmelCase__ = KandinskyInpaintPipeline
lowerCAmelCase__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
lowerCAmelCase__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
lowerCAmelCase__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase__ = False
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return 32
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return 32
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return 100
@property
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
UpperCAmelCase__ : Optional[int] = MultilingualCLIP(__snake_case )
UpperCAmelCase__ : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase__ : Dict = UNetaDConditionModel(**__snake_case )
return model
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.dummy_text_encoder
UpperCAmelCase__ : List[str] = self.dummy_tokenizer
UpperCAmelCase__ : List[Any] = self.dummy_unet
UpperCAmelCase__ : List[Any] = self.dummy_movq
UpperCAmelCase__ : List[Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__snake_case , )
UpperCAmelCase__ : Any = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase_ ( self : Optional[Any] , _A : List[str] , _A : Tuple=0 ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase__ : Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[Any] = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
UpperCAmelCase__ : Optional[Any] = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase__ : Union[str, Any] = 0
if str(__snake_case ).startswith('''mps''' ):
UpperCAmelCase__ : str = torch.manual_seed(__snake_case )
else:
UpperCAmelCase__ : str = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase__ : str = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = '''cpu'''
UpperCAmelCase__ : List[str] = self.get_dummy_components()
UpperCAmelCase__ : Any = self.pipeline_class(**__snake_case )
UpperCAmelCase__ : Any = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__snake_case ) )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Dict = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
UpperCAmelCase__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase__ : int = np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : int = '''a hat'''
UpperCAmelCase__ : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
UpperCAmelCase__ : Optional[Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
UpperCAmelCase__ : Tuple = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase__ : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase__ : str = pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 75 |
from __future__ import annotations
def UpperCAmelCase_ ( _UpperCAmelCase :list[float] , _UpperCAmelCase :list[float] ) -> float:
'''simple docstring'''
A_ = sorted(numsa + numsa )
A_ , A_ = divmod(len(_UpperCAmelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Optional[Any] = [float(x) for x in input('Enter the elements of first array: ').split()]
a__ : int = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 188 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = SpeechTaTokenizer
_snake_case : Union[str, Any] = False
_snake_case : Any = True
def A ( self : Tuple )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase = SpeechTaTokenizer(A_ )
__UpperCamelCase = AddedToken("<mask>" , lstrip=A_ , rstrip=A_ )
__UpperCamelCase = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Optional[Any] , A_ : Any )-> List[str]:
__UpperCamelCase = "this is a test"
__UpperCamelCase = "this is a test"
return input_text, output_text
def A ( self : Union[str, Any] , A_ : str , A_ : Any=False , A_ : Union[str, Any]=20 , A_ : List[str]=5 )-> Optional[Any]:
__UpperCamelCase , __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
return text, ids
def A ( self : Optional[int] )-> Any:
__UpperCamelCase = "<pad>"
__UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def A ( self : Optional[int] )-> Tuple:
__UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(A_ ) , 81 )
def A ( self : int )-> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def A ( self : List[Any] )-> Optional[int]:
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__UpperCamelCase = tokenizer.vocab_size
__UpperCamelCase = len(A_ )
self.assertNotEqual(A_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__UpperCamelCase = ["aaaaa bbbbbb", "cccccccccdddddddd"]
__UpperCamelCase = tokenizer.add_tokens(A_ )
__UpperCamelCase = tokenizer.vocab_size
__UpperCamelCase = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size + len(A_ ) )
__UpperCamelCase = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__UpperCamelCase = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
__UpperCamelCase = tokenizer.add_special_tokens(A_ )
__UpperCamelCase = tokenizer.vocab_size
__UpperCamelCase = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size_a + len(A_ ) )
__UpperCamelCase = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def A ( self : Tuple )-> Dict:
pass
def A ( self : Any )-> Tuple:
pass
def A ( self : Union[str, Any] )-> Optional[int]:
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(A_ , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__UpperCamelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A_ , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
# fmt: off
self.assertListEqual(A_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def A ( self : str )-> List[str]:
__UpperCamelCase = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
__UpperCamelCase = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=A_ , )
| 703 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Union[str, Any] )-> Tuple:
__UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
__UpperCamelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__UpperCamelCase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCamelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCamelCase = model(A_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , A_ , atol=1e-3 ) )
@slow
def A ( self : List[Any] )-> Union[str, Any]:
__UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
__UpperCamelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__UpperCamelCase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCamelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCamelCase = model(A_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , A_ , atol=1e-3 ) ) | 228 | 0 |
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[int]:
_lowercase : int = []
_lowercase : Optional[Any] = 2
_lowercase : Union[str, Any] = int(math.sqrt(SCREAMING_SNAKE_CASE ) ) # Size of every segment
_lowercase : Union[str, Any] = [True] * (end + 1)
_lowercase : List[str] = []
while start <= end:
if temp[start] is True:
in_prime.append(SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = False
start += 1
prime += in_prime
_lowercase : int = end + 1
_lowercase : List[str] = min(2 * end , SCREAMING_SNAKE_CASE )
while low <= n:
_lowercase : str = [True] * (high - low + 1)
for each in in_prime:
_lowercase : Dict = math.floor(low / each ) * each
if t < low:
t += each
for j in range(SCREAMING_SNAKE_CASE , high + 1 , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = False
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
_lowercase : Tuple = high + 1
_lowercase : Any = min(high + end , SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 66 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PegasusTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def lowerCamelCase__ (self : Tuple , **_UpperCAmelCase : int ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """</s>"""
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_UpperCAmelCase ) , 1103 )
def lowerCamelCase__ (self : str ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowercase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowercase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowercase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowercase__ = """To ensure a smooth flow of bank resolutions."""
lowercase__ = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowercase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ["""This is going to be way too long.""" * 150, """short example"""]
lowercase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def lowerCamelCase__ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = PegasusTokenizer
A__ = PegasusTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PegasusTokenizer(_UpperCAmelCase , offset=0 , mask_token_sent=_UpperCAmelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def lowerCamelCase__ (self : str , **_UpperCAmelCase : Union[str, Any] ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowercase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_torch
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ["""This is going to be way too long.""" * 1000, """short example"""]
lowercase__ = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowercase__ = self._large_tokenizer(_UpperCAmelCase ).input_ids
self.assertListEqual(
_UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 15 | 0 |
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [0] * len(__A )
_lowerCamelCase : int = []
_lowerCamelCase : List[Any] = [1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
_lowerCamelCase : str = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_lowerCamelCase : int = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
lowerCAmelCase : Optional[Any] ={0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 15 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : int = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
_lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
_lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCamelCase : str = None
_lowerCamelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components)
_lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components)
_lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : str = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Any = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : List[str] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def A__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 15 | 1 |
import operator
def lowerCAmelCase_ ( __a , __a = False , __a = None ) -> list:
"""simple docstring"""
lowerCamelCase__: int =operator.lt if reverse else operator.gt
lowerCamelCase__: Union[str, Any] =solution or []
if not arr:
return solution
lowerCamelCase__: List[Any] =[arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_ , sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
lowerCamelCase__: str =sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_ , snake_case_ ):
solution.insert(snake_case_ , snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_ , snake_case_ , snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 59 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
a_ = logging.get_logger(__name__)
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 177 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Any = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__UpperCAmelCase : int = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__UpperCAmelCase : List[Any] = tf_top_k_top_p_filtering(_A , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__UpperCAmelCase : List[str] = output[output != -float("""inf""" )]
__UpperCAmelCase : int = tf.cast(
tf.where(tf.not_equal(_A , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_A , _A , rtol=1e-1_2 )
tf.debugging.assert_equal(_A , _A )
@require_tf
class lowerCamelCase__ ( unittest.TestCase , snake_case__ ):
"""simple docstring"""
if is_tf_available():
__a = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : int = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__UpperCAmelCase : str = 2
__UpperCAmelCase : int = 2
class lowerCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : Any ):
'''simple docstring'''
super(_A , self ).__init__()
__UpperCAmelCase : Any = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_A , )
def lowerCamelCase__ ( self : int , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model.generate(
input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , )
return {"sequences": outputs["sequences"]}
__UpperCAmelCase : Optional[Any] = [[2, 0], [102, 103]]
__UpperCAmelCase : Tuple = [[1, 0], [1, 1]]
__UpperCAmelCase : List[Any] = DummyModel(model=_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_A , _A , signatures={"""serving_default""": dummy_model.serving} )
__UpperCAmelCase : Dict = tf.saved_model.load(_A ).signatures['serving_default']
for batch_size in range(1 , len(_A ) + 1 ):
__UpperCAmelCase : List[Any] = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
__UpperCAmelCase : str = serving_func(**_A )['sequences']
__UpperCAmelCase : List[Any] = test_model.generate(**_A , max_new_tokens=_A )
tf.debugging.assert_equal(_A , _A )
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__UpperCAmelCase : int = 1
__UpperCAmelCase : List[str] = 2
class lowerCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : List[Any] ):
'''simple docstring'''
super(_A , self ).__init__()
__UpperCAmelCase : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_A , )
def lowerCamelCase__ ( self : int , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = self.model.generate(
input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , )
return {"sequences": outputs["sequences"]}
__UpperCAmelCase : List[str] = [[2], [102, 103]]
__UpperCAmelCase : Optional[Any] = [[1], [1, 1]]
__UpperCAmelCase : List[str] = DummyModel(model=_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_A , _A , signatures={"""serving_default""": dummy_model.serving} )
__UpperCAmelCase : int = tf.saved_model.load(_A ).signatures['serving_default']
for input_row in range(len(_A ) ):
__UpperCAmelCase : str = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
__UpperCAmelCase : Tuple = serving_func(**_A )['sequences']
__UpperCAmelCase : Dict = test_model.generate(**_A , max_new_tokens=_A )
tf.debugging.assert_equal(_A , _A )
@slow
@require_tensorflow_text
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=_A )
class lowerCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_A , """spiece.model""" ) , """rb""" ).read() )
__UpperCAmelCase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.tokenizer.tokenize(_A )
__UpperCAmelCase : Optional[int] = text.pad_model_inputs(
_A , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__UpperCAmelCase : str = self.model.generate(input_ids=_A , attention_mask=_A )
return self.tokenizer.detokenize(_A )
__UpperCAmelCase : int = CompleteSentenceTransformer()
__UpperCAmelCase : Any = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
__UpperCAmelCase : List[Any] = complete_model(_A )
__UpperCAmelCase : List[Any] = tf.keras.Model(_A , _A )
keras_model.save(_A )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
__UpperCAmelCase : Union[str, Any] = 14
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__UpperCAmelCase : Union[str, Any] = 'Hello, my dog is cute and'
__UpperCAmelCase : Optional[int] = tokenizer(_A , return_tensors="""tf""" )
__UpperCAmelCase : Dict = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__UpperCAmelCase : List[str] = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__UpperCAmelCase : Union[str, Any] = model.generate(**_A , eos_token_id=_A , **_A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__UpperCAmelCase : List[str] = [638, 198]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__UpperCAmelCase : Dict = model.generate(**_A , eos_token_id=_A , **_A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__UpperCAmelCase : Optional[Any] = 'Hugging Face is a technology company based in New York and Paris.'
__UpperCAmelCase : Optional[Any] = bart_tokenizer(_A , return_tensors="""tf""" ).input_ids
__UpperCAmelCase : List[Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__UpperCAmelCase : str = bart_model.generate(_A ).numpy()
class lowerCamelCase__ ( snake_case__ ):
"""simple docstring"""
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any=None , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return super().call(_A , **_A )
__UpperCAmelCase : Any = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__UpperCAmelCase : Tuple = bart_model.generate(_A , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(_A , _A ) )
class lowerCamelCase__ ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def lowerCamelCase__ ( self : int , UpperCamelCase : List[str] , **UpperCamelCase : Tuple ):
'''simple docstring'''
return super().call(_A , **_A )
__UpperCAmelCase : Union[str, Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
__UpperCAmelCase : Any = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__UpperCAmelCase : Tuple = bart_model.generate(_A ).numpy()
with self.assertRaises(_A ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_A , foo="""bar""" )
| 711 |
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : str , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Optional[Any]=[30, 30] , UpperCamelCase : Dict=2 , UpperCamelCase : Dict=3 , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Dict=True , UpperCamelCase : Tuple=32 , UpperCamelCase : List[Any]=5 , UpperCamelCase : List[Any]=4 , UpperCamelCase : Any=37 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : str=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : List[Any]=3 , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[str]=8 , UpperCamelCase : Any=10 , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Optional[Any] = patch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : str = num_labels
__UpperCAmelCase : Dict = scope
__UpperCAmelCase : str = n_targets
__UpperCAmelCase : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__UpperCAmelCase : Optional[int] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__UpperCAmelCase : List[Any] = num_patches + 1 + self.num_detection_tokens
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__UpperCAmelCase : Tuple = []
for i in range(self.batch_size ):
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase )
__UpperCAmelCase : Tuple = torch.rand(self.n_targets , 4 , device=UpperCamelCase )
labels.append(UpperCamelCase )
__UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = YolosModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : str = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = YolosForObjectDetection(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : str = model(pixel_values=UpperCamelCase )
__UpperCAmelCase : str = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__UpperCAmelCase : Any = model(pixel_values=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__a = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
__a = False
__a = False
__a = False
__a = False
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : int=False ):
'''simple docstring'''
__UpperCAmelCase : int = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__UpperCAmelCase : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Optional[int] = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase , dtype=torch.long )
__UpperCAmelCase : Union[str, Any] = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase , dtype=torch.float )
labels.append(UpperCamelCase )
__UpperCAmelCase : List[Any] = labels
return inputs_dict
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = YolosModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = model_class(UpperCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : int = [*signature.parameters.keys()]
__UpperCAmelCase : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = True
# in YOLOS, the seq_len is different
__UpperCAmelCase : List[str] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Any = False
__UpperCAmelCase : Any = True
__UpperCAmelCase : List[str] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[str] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
__UpperCAmelCase : str = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Dict = True
__UpperCAmelCase : str = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
__UpperCAmelCase : Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__UpperCAmelCase : Optional[int] = len(UpperCamelCase )
# Check attention is always last and order is fine
__UpperCAmelCase : Any = True
__UpperCAmelCase : Dict = True
__UpperCAmelCase : str = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
__UpperCAmelCase : int = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase ) )
__UpperCAmelCase : Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
__UpperCAmelCase : str = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
__UpperCAmelCase : str = outputs.hidden_states
__UpperCAmelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# YOLOS has a different seq_length
__UpperCAmelCase : int = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase )
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : int = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(UpperCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Any = prepare_img()
__UpperCAmelCase : Union[str, Any] = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(inputs.pixel_values )
# verify outputs
__UpperCAmelCase : Any = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
__UpperCAmelCase : List[str] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=UpperCamelCase , )
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
# verify postprocessing
__UpperCAmelCase : List[str] = image_processor.post_process_object_detection(
UpperCamelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__UpperCAmelCase : Optional[Any] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(UpperCamelCase )
__UpperCAmelCase : Any = [75, 75, 17, 63, 17]
__UpperCAmelCase : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(UpperCamelCase )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , UpperCamelCase , atol=1e-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , UpperCamelCase )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , UpperCamelCase ) )
| 299 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any=1_3 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : Optional[Any]=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : str=[2, 2, 3, 2] , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=1_0 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowerCAmelCase_ : List[str]=[2, 3, 4] , lowerCAmelCase_ : Union[str, Any]=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = num_channels
lowercase_ = num_stages
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = num_labels
lowercase_ = initializer_range
lowercase_ = out_features
lowercase_ = out_indices
lowercase_ = scope
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.num_labels)
lowercase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = ConvNextModel(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
lowercase_ = model(_lowerCAmelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = ConvNextForImageClassification(_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
lowercase_ = model(_lowerCAmelCase , labels=_lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = ConvNextBackbone(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
lowercase_ = model(_lowerCAmelCase)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = ConvNextBackbone(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
lowercase_ = model(_lowerCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ConvNextModelTester(self)
lowercase_ = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=3_7)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(_lowerCAmelCase)
lowercase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Any):
lowercase_ = model_class(_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase))
lowercase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = ConvNextModel.from_pretrained(_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
def _SCREAMING_SNAKE_CASE () -> Optional[int]:
'''simple docstring'''
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""") if is_vision_available() else None
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""").to(_lowerCAmelCase)
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=_lowerCAmelCase , return_tensors="""pt""").to(_lowerCAmelCase)
# forward pass
with torch.no_grad():
lowercase_ = model(**_lowerCAmelCase)
# verify the logits
lowercase_ = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , _lowerCAmelCase)
lowercase_ = torch.tensor([-0.0_260, -0.4_739, 0.1_911]).to(_lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , __UpperCAmelCase ):
lowercase__ = (ConvNextBackbone,) if is_torch_available() else ()
lowercase__ = ConvNextConfig
lowercase__ = False
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = ConvNextModelTester(self)
| 567 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : Tuple="" , _lowerCAmelCase : List[str]="train"):
'''simple docstring'''
assert os.path.isdir(_lowerCAmelCase)
__lowercase =[]
__lowercase =os.listdir(_lowerCAmelCase)
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__lowercase =os.path.join(_lowerCAmelCase , _lowerCAmelCase)
if not os.path.isfile(_lowerCAmelCase):
continue
self.documents.append(_lowerCAmelCase)
def __len__( self : Dict):
'''simple docstring'''
return len(self.documents)
def __getitem__( self : Optional[Any] , _lowerCAmelCase : str):
'''simple docstring'''
__lowercase =self.documents[idx]
__lowercase =document_path.split('/')[-1]
with open(_lowerCAmelCase , encoding='utf-8') as source:
__lowercase =source.read()
__lowercase , __lowercase =process_story(_lowerCAmelCase)
return document_name, story_lines, summary_lines
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =list(filter(lambda _lowerCAmelCase : len(_lowerCAmelCase ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
__lowercase =[_add_missing_period(_lowerCAmelCase ) for line in nonempty_lines]
# gather article lines
__lowercase =[]
__lowercase =deque(_lowerCAmelCase )
while True:
try:
__lowercase =lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_lowerCAmelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__lowercase =list(filter(lambda _lowerCAmelCase : not t.startswith('@highlight' ) , _lowerCAmelCase ) )
return story_lines, summary_lines
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if len(_lowerCAmelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_lowerCAmelCase )) )
return sequence
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =torch.ones_like(_lowerCAmelCase )
__lowercase =sequence == pad_token_id
__lowercase =0
return mask
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[tokenizer.encode(_lowerCAmelCase ) for line in story_lines]
__lowercase =[token for sentence in story_lines_token_ids for token in sentence]
__lowercase =[tokenizer.encode(_lowerCAmelCase ) for line in summary_lines]
__lowercase =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
for sequence in batch:
__lowercase =-1
__lowercase =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_lowerCAmelCase )
return torch.tensor(_lowerCAmelCase )
| 474 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case__ ( UpperCAmelCase : Dict ):
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case__ ( ):
lowerCAmelCase__ :str = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=UpperCAmelCase )
lowerCAmelCase__ :Any = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(UpperCAmelCase )
EnvironmentCommand.register_subcommand(UpperCAmelCase )
TestCommand.register_subcommand(UpperCAmelCase )
RunBeamCommand.register_subcommand(UpperCAmelCase )
DummyDataCommand.register_subcommand(UpperCAmelCase )
# Parse args
lowerCAmelCase__ ,lowerCAmelCase__ :int = parser.parse_known_args()
if not hasattr(UpperCAmelCase , "func" ):
parser.print_help()
exit(1 )
lowerCAmelCase__ :Optional[int] = parse_unknown_args(UpperCAmelCase )
# Run
lowerCAmelCase__ :str = args.func(UpperCAmelCase , **UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 111 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : List[Any] = 16
_a : Tuple = 32
def snake_case__ ( UpperCAmelCase : Accelerator , UpperCAmelCase : int = 1_6 ):
lowerCAmelCase__ :Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase__ :Any = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCAmelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ :Any = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ :List[str] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ :Optional[int] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ :int = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ :Optional[int] = 8
else:
lowerCAmelCase__ :List[Any] = None
return tokenizer.pad(
UpperCAmelCase , padding="longest" , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase__ :Dict = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : List[Any] = mocked_dataloaders # noqa: F811
def snake_case__ ( UpperCAmelCase : Tuple , UpperCAmelCase : str ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , UpperCAmelCase ) == "1":
lowerCAmelCase__ :Tuple = 2
# New Code #
lowerCAmelCase__ :Tuple = int(args.gradient_accumulation_steps )
lowerCAmelCase__ :str = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase__ :Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ :Union[str, Any] = config["lr"]
lowerCAmelCase__ :int = int(config["num_epochs"] )
lowerCAmelCase__ :List[Any] = int(config["seed"] )
lowerCAmelCase__ :int = int(config["batch_size"] )
lowerCAmelCase__ :Tuple = evaluate.load("glue" , "mrpc" )
set_seed(UpperCAmelCase )
lowerCAmelCase__ ,lowerCAmelCase__ :Any = get_dataloaders(UpperCAmelCase , UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ :Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ :Any = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ :Optional[int] = AdamW(params=model.parameters() , lr=UpperCAmelCase )
# Instantiate scheduler
lowerCAmelCase__ :List[Any] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :Tuple = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=UpperCAmelCase , model=UpperCAmelCase , local_sgd_steps=UpperCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCAmelCase ):
lowerCAmelCase__ :List[str] = model(**UpperCAmelCase )
lowerCAmelCase__ :List[Any] = output.loss
accelerator.backward(UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ :Dict = model(**UpperCAmelCase )
lowerCAmelCase__ :List[Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ ,lowerCAmelCase__ :List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
lowerCAmelCase__ :Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase )
def snake_case__ ( ):
lowerCAmelCase__ :Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=UpperCAmelCase , default=UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=UpperCAmelCase , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowerCAmelCase__ :Tuple = parser.parse_args()
lowerCAmelCase__ :Any = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 111 | 1 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def a_ ( ) -> str:
"""simple docstring"""
snake_case__ = HfArgumentParser(_A )
snake_case__ = parser.parse_args_into_dataclasses()[0]
snake_case__ = TensorFlowBenchmark(args=_A )
try:
snake_case__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
snake_case__ = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
snake_case__ = ' '.join(str(_A ).split(' ' )[:-1] )
snake_case__ = ''
snake_case__ = eval(str(_A ).split(' ' )[-1] )
snake_case__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_A )
if len(_A ) > 0:
snake_case__ = full_error_msg + begin_error_msg + str(_A )
raise ValueError(_A )
benchmark.run()
if __name__ == "__main__":
main()
| 328 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {
"""facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "timesformer"
def __init__( self: Dict , UpperCamelCase: Optional[Any]=2_24 , UpperCamelCase: int=16 , UpperCamelCase: Optional[int]=3 , UpperCamelCase: List[str]=8 , UpperCamelCase: List[Any]=7_68 , UpperCamelCase: List[str]=12 , UpperCamelCase: List[str]=12 , UpperCamelCase: Dict=30_72 , UpperCamelCase: str="gelu" , UpperCamelCase: Any=0.0 , UpperCamelCase: int=0.0 , UpperCamelCase: int=0.02 , UpperCamelCase: Optional[int]=1e-6 , UpperCamelCase: Tuple=True , UpperCamelCase: Tuple="divided_space_time" , UpperCamelCase: int=0 , **UpperCamelCase: List[str] , ) -> str:
super().__init__(**UpperCamelCase )
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = num_frames
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = qkv_bias
snake_case__ = attention_type
snake_case__ = drop_path_rate
| 328 | 1 |
"""simple docstring"""
from math import pow
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowerCamelCase : Dict = int(pow(UpperCamelCase__ , UpperCamelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowerCamelCase : Union[str, Any] = backtrack(
UpperCamelCase__ , UpperCamelCase__ , current_number + 1 , UpperCamelCase__ , UpperCamelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowerCamelCase : Any = backtrack(
UpperCamelCase__ , UpperCamelCase__ , current_number + 1 , UpperCamelCase__ , UpperCamelCase__ )
return current_sum, solutions_count
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(UpperCamelCase__ , UpperCamelCase__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = filter(lambda __lowerCAmelCase : p.requires_grad , model.parameters() )
snake_case__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__magic_name__ = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
if metric == "rouge2":
snake_case__ = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
snake_case__ = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
snake_case__ = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
snake_case__ = ModelCheckpoint(
dirpath=__lowerCAmelCase , filename=__lowerCAmelCase , monitor=F"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=__lowerCAmelCase , verbose=__lowerCAmelCase , )
class _SCREAMING_SNAKE_CASE ( pl.Callback ):
def A_ ( self , lowerCamelCase , lowerCamelCase ):
snake_case__ = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCamelCase )
@rank_zero_only
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=True ):
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
snake_case__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
snake_case__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
snake_case__ = od / "test_results.txt"
snake_case__ = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
snake_case__ = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
snake_case__ = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCamelCase )
generations_file.parent.mkdir(exist_ok=lowerCamelCase )
with open(lowerCamelCase , "a+" ) as writer:
for key in sorted(lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
snake_case__ = metrics[key]
if isinstance(lowerCamelCase , torch.Tensor ):
snake_case__ = val.item()
snake_case__ = F"""{key}: {val:.6f}\n"""
writer.write(lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
snake_case__ = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(lowerCamelCase )
@rank_zero_only
def A_ ( self , lowerCamelCase , lowerCamelCase ):
try:
snake_case__ = pl_module.model.model.num_parameters()
except AttributeError:
snake_case__ = pl_module.model.num_parameters()
snake_case__ = count_trainable_parameters(lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def A_ ( self , lowerCamelCase , lowerCamelCase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCamelCase , lowerCamelCase , "test" )
@rank_zero_only
def A_ ( self , lowerCamelCase , lowerCamelCase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 276 |
import os
from datetime import datetime as dt
from github import Github
__magic_name__ = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = Github(os.environ["GITHUB_TOKEN"] )
snake_case__ = g.get_repo("huggingface/accelerate" )
snake_case__ = repo.get_issues(state="open" )
for issue in open_issues:
snake_case__ = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
snake_case__ = comments[0] if len(__lowerCAmelCase ) > 0 else None
snake_case__ = dt.utcnow()
snake_case__ = (current_time - issue.updated_at).days
snake_case__ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 276 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase : Tuple = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 674 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
lowerCAmelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = weights[0][0][0]
lowerCAmelCase__ = np.asarray(layer_norm_a[0] )
lowerCAmelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
lowerCAmelCase__ = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
lowerCAmelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
lowerCAmelCase__ = intermediate_weights[2]
# layernorm 2
lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = torch_model.reformer
# word embeds
lowerCAmelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
lowerCAmelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
lowerCAmelCase__ = np.asarray(weights[7][0] )
lowerCAmelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
lowerCAmelCase__ = np.asarray(weights[9][0] )
lowerCAmelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as f:
lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 674 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase: List[Any] = logging.get_logger(__name__)
__UpperCamelCase: Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model"""}
__UpperCamelCase: str = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
__UpperCamelCase: Optional[int] = {
"""camembert-base""": 5_1_2,
}
__UpperCamelCase: int = """▁"""
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
def __init__( self: str, lowerCamelCase_: Optional[int], lowerCamelCase_: Any="<s>", lowerCamelCase_: Tuple="</s>", lowerCamelCase_: Optional[Any]="</s>", lowerCamelCase_: Optional[int]="<s>", lowerCamelCase_: Optional[Any]="<unk>", lowerCamelCase_: Union[str, Any]="<pad>", lowerCamelCase_: str="<mask>", lowerCamelCase_: str=["<s>NOTUSED", "</s>NOTUSED"], lowerCamelCase_: Optional[Dict[str, Any]] = None, **lowerCamelCase_: Union[str, Any], ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : str = AddedToken(lowerCamelCase_, lstrip=lowerCamelCase_, rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_, lowerCamelCase_ ) else mask_token
lowercase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, cls_token=lowerCamelCase_, pad_token=lowerCamelCase_, mask_token=lowerCamelCase_, additional_special_tokens=lowerCamelCase_, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase_, )
lowercase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase_ ) )
lowercase__ : Tuple = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowercase__ : Dict = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
lowercase__ : Tuple = len(self.fairseq_tokens_to_ids )
lowercase__ : List[str] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowercase__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case__( self: Optional[int], lowerCamelCase_: List[int], lowerCamelCase_: Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : List[str] = [self.cls_token_id]
lowercase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__( self: Dict, lowerCamelCase_: List[int], lowerCamelCase_: Optional[List[int]] = None, lowerCamelCase_: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_, token_ids_a=lowerCamelCase_, already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def snake_case__( self: Tuple, lowerCamelCase_: List[int], lowerCamelCase_: Optional[List[int]] = None ):
lowercase__ : Any = [self.sep_token_id]
lowercase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case__( self: Tuple ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def snake_case__( self: int ):
lowercase__ : Dict = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__( self: List[str], lowerCamelCase_: str ):
return self.sp_model.encode(lowerCamelCase_, out_type=lowerCamelCase_ )
def snake_case__( self: Dict, lowerCamelCase_: int ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowerCamelCase_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowerCamelCase_ )
def snake_case__( self: List[Any], lowerCamelCase_: Optional[int] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case__( self: Optional[int], lowerCamelCase_: Union[str, Any] ):
lowercase__ : Optional[Any] = []
lowercase__ : List[str] = ''
lowercase__ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
lowercase__ : int = True
lowercase__ : Dict = []
else:
current_sub_tokens.append(lowerCamelCase_ )
lowercase__ : str = False
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def __getstate__( self: Tuple ):
lowercase__ : Union[str, Any] = self.__dict__.copy()
lowercase__ : Union[str, Any] = None
return state
def __setstate__( self: Optional[int], lowerCamelCase_: Dict ):
lowercase__ : Dict = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowercase__ : int = {}
lowercase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__( self: Tuple, lowerCamelCase_: str, lowerCamelCase_: Optional[str] = None ):
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[Any] = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_, 'wb' ) as fi:
lowercase__ : Tuple = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 266 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[float] , _lowercase : Tuple ) -> int:
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_lowercase ):
print(f"""{i}\t\t{d}""" )
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[dict[str, int]] , _lowercase : list[float] , _lowercase : int ) -> Any:
'''simple docstring'''
for j in range(_lowercase ):
lowercase__ , lowercase__ , lowercase__ : Dict = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[dict[str, int]] , _lowercase : int , _lowercase : int , _lowercase : int ) -> list[float]:
'''simple docstring'''
lowercase__ : Dict = [float('inf' )] * vertex_count
lowercase__ : Dict = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_lowercase ):
lowercase__ , lowercase__ , lowercase__ : int = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
lowercase__ : str = distance[u] + w
lowercase__ : str = check_negative_cycle(_lowercase , _lowercase , _lowercase )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase: Optional[int] = int(input("""Enter number of vertices: """).strip())
__UpperCamelCase: Union[str, Any] = int(input("""Enter number of edges: """).strip())
__UpperCamelCase: list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase: List[str] = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
__UpperCamelCase: List[Any] = {"""src""": src, """dst""": dest, """weight""": weight}
__UpperCamelCase: Optional[int] = int(input("""\nEnter shortest path source:""").strip())
__UpperCamelCase: Dict = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 266 | 1 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] = None , _lowerCamelCase : str = None , _lowerCamelCase : Any = None , ) -> int:
'''simple docstring'''
if config_name_or_path is None:
__UpperCamelCase : int = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
__UpperCamelCase : List[str] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__UpperCamelCase : Optional[Any] = question_encoder_name_or_path
__UpperCamelCase : Optional[Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
__UpperCamelCase : Tuple = RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
__UpperCamelCase : List[str] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
__UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
__UpperCamelCase : Optional[Any] = gen_config
__UpperCamelCase : List[str] = question_encoder_config
__UpperCamelCase : Optional[int] = model_class.from_pretrained_question_encoder_generator(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE)
rag_model.save_pretrained(__SCREAMING_SNAKE_CASE)
# Sanity check.
model_class.from_pretrained(__SCREAMING_SNAKE_CASE)
# Save tokenizers.
__UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE)
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/")
__UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE)
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/")
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
lowercase : Optional[Any] = parser.parse_args()
lowercase : Optional[Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 701 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int=32 , _lowerCamelCase : str=10 , _lowerCamelCase : Dict=100 , _lowerCamelCase : int=1_026 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : str="data/tokenized_stories_train_wikitext103.jbl" , _lowerCamelCase : Any="igf_context_pairs.jbl" , ) -> str:
'''simple docstring'''
set_seed(3)
# generate train_data and objective_set
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = generate_datasets(
_lowerCamelCase , _lowerCamelCase , number=_lowerCamelCase , min_len=1_026 , trim=_lowerCamelCase)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__UpperCamelCase : Dict = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# load pretrained model
__UpperCamelCase : str = load_gpta("gpt2").to(_lowerCamelCase)
print("computing perplexity on objective set")
__UpperCamelCase : Union[str, Any] = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase).item()
print("perplexity on objective set:" , _lowerCamelCase)
# collect igf pairs and save to file demo.jbl
collect_objective_set(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any=15 , _lowerCamelCase : Union[str, Any]=128 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]="igf_model.pt" , ) -> Any:
'''simple docstring'''
set_seed(42)
# Load pre-trained model
__UpperCamelCase : int = GPTaLMHeadModel.from_pretrained("gpt2")
# Initialize secondary learner to use embedding weights of model
__UpperCamelCase : Any = SecondaryLearner(_lowerCamelCase)
# Train secondary learner
__UpperCamelCase : Union[str, Any] = train_secondary_learner(
_lowerCamelCase , _lowerCamelCase , max_epochs=_lowerCamelCase , batch_size=_lowerCamelCase , eval_freq=100 , igf_model_path=_lowerCamelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int=32 , _lowerCamelCase : Tuple=1_000 , _lowerCamelCase : Dict=16 , _lowerCamelCase : Union[str, Any]=1.0 , _lowerCamelCase : Optional[Any]=recopy_gpta , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Union[str, Any]="gpt2_finetuned.pt" , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
__UpperCamelCase : List[Any] = RandomSampler(_lowerCamelCase)
__UpperCamelCase : Any = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase)
__UpperCamelCase : Tuple = max_steps // (len(_lowerCamelCase)) + 1
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : List[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCamelCase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = recopy_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
model.train()
if secondary_learner is not None:
secondary_learner.to(_lowerCamelCase)
secondary_learner.eval()
__UpperCamelCase : Union[str, Any] = []
__UpperCamelCase : Any = 0
__UpperCamelCase : List[Any] = []
__UpperCamelCase : Any = []
# Compute the performance of the transformer model at the beginning
__UpperCamelCase : str = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
test_perps.append(_lowerCamelCase)
print("Test perplexity, step" , _lowerCamelCase , ":" , _lowerCamelCase)
for epoch in range(int(_lowerCamelCase)):
for step, example in enumerate(_lowerCamelCase):
torch.cuda.empty_cache()
__UpperCamelCase : Optional[Any] = random.randint(0 , example.size(2) - context_len - 1)
__UpperCamelCase : Optional[Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__UpperCamelCase : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase)
__UpperCamelCase : int = True
if secondary_learner is not None:
__UpperCamelCase : Optional[int] = secondary_learner.forward(
torch.tensor(_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase).unsqueeze(0))[0].item()
observed_qs.append(float(_lowerCamelCase))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__UpperCamelCase : List[str] = -1
if predicted_q < threshold:
__UpperCamelCase : Optional[int] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
__UpperCamelCase : Optional[Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__UpperCamelCase : str = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__UpperCamelCase : List[Any] = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
test_perps.append(_lowerCamelCase)
print("Test perplexity, step" , _lowerCamelCase , ":" , _lowerCamelCase)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _lowerCamelCase)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task")
# Required parameters
parser.add_argument(
"--data_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=_lowerCamelCase , default=_lowerCamelCase , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=_lowerCamelCase , default=_lowerCamelCase , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=_lowerCamelCase , type=_lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=_lowerCamelCase , default=_lowerCamelCase , help="A seed for reproducible training.")
parser.add_argument(
"--context_len" , default=32 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=_lowerCamelCase , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=_lowerCamelCase , help="secondary model evaluation is triggered at eval_freq")
parser.add_argument("--max_steps" , default=1_000 , type=_lowerCamelCase , help="To calculate training epochs")
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=_lowerCamelCase , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=_lowerCamelCase , help="batch size of training data of language model(gpt2) ")
parser.add_argument(
"--eval_interval" , default=10 , type=_lowerCamelCase , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=_lowerCamelCase , help="The number of examples split to be used as objective_set/test_data")
parser.add_argument(
"--min_len" , default=1_026 , type=_lowerCamelCase , help="The minimum length of the article to be used as objective set")
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=_lowerCamelCase , help="number of epochs to train secondary learner")
parser.add_argument("--trim" , default=_lowerCamelCase , type=_lowerCamelCase , help="truncate the example if it exceeds context length")
parser.add_argument(
"--threshold" , default=1.0 , type=_lowerCamelCase , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_lowerCamelCase , help="finetuned_model_name")
parser.add_argument(
"--recopy_model" , default=_lowerCamelCase , type=_lowerCamelCase , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=_lowerCamelCase , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
__UpperCamelCase : Any = joblib.load("data/IGF_values.jbl")
# Train secondary learner
__UpperCamelCase : Optional[Any] = training_secondary_learner(
_lowerCamelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
__UpperCamelCase : int = GPTaLMHeadModel.from_pretrained("gpt2")
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1_026 , trim=_lowerCamelCase)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCamelCase , secondary_learner=_lowerCamelCase , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main() | 94 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__UpperCAmelCase : Optional[Any] = str(bin(UpperCamelCase ) )[2:] # remove the leading "0b"
__UpperCAmelCase : int = str(bin(UpperCamelCase ) )[2:] # remove the leading "0b"
__UpperCAmelCase : Tuple = max(len(UpperCamelCase ) , len(UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase ) , b_binary.zfill(UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 | """simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if len(__lowerCAmelCase ) != 2 or len(a[0] ) != 2 or len(__lowerCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
lowerCamelCase__ =[
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCAmelCase ) )
]
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCAmelCase ) )
]
def lowerCamelCase_ ( __lowerCAmelCase ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__lowerCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
lowerCamelCase__ =len(__lowerCAmelCase )
lowerCamelCase__ =matrix_length // 2
lowerCamelCase__ =[[a[i][j] for j in range(__lowerCAmelCase , __lowerCAmelCase )] for i in range(__lowerCAmelCase )]
lowerCamelCase__ =[
[a[i][j] for j in range(__lowerCAmelCase , __lowerCAmelCase )] for i in range(__lowerCAmelCase , __lowerCAmelCase )
]
lowerCamelCase__ =[[a[i][j] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase )]
lowerCamelCase__ =[[a[i][j] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase , __lowerCAmelCase )]
return top_left, top_right, bot_left, bot_right
def lowerCamelCase_ ( __lowerCAmelCase ) -> tuple[int, int]:
'''simple docstring'''
return len(__lowerCAmelCase ), len(matrix[0] )
def lowerCamelCase_ ( __lowerCAmelCase ) -> None:
'''simple docstring'''
print("\n".join(str(__lowerCAmelCase ) for line in matrix ) )
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(__lowerCAmelCase ) == (2, 2):
return default_matrix_multiplication(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =split_matrix(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =split_matrix(__lowerCAmelCase )
lowerCamelCase__ =actual_strassen(__lowerCAmelCase , matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ =actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
lowerCamelCase__ =actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
lowerCamelCase__ =actual_strassen(__lowerCAmelCase , matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ =actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ =actual_strassen(matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ =actual_strassen(matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ =matrix_addition(matrix_subtraction(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) , __lowerCAmelCase )
lowerCamelCase__ =matrix_addition(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =matrix_addition(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =matrix_subtraction(matrix_subtraction(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) , __lowerCAmelCase )
# construct the new matrix from our 4 quadrants
lowerCamelCase__ =[]
for i in range(len(__lowerCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowerCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(__lowerCAmelCase )[1] != matrix_dimensions(__lowerCAmelCase )[0]:
lowerCamelCase__ =(
"Unable to multiply these matrices, please check the dimensions.\n"
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(__lowerCAmelCase )
lowerCamelCase__ =matrix_dimensions(__lowerCAmelCase )
lowerCamelCase__ =matrix_dimensions(__lowerCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowerCamelCase__ =max(*__lowerCAmelCase , *__lowerCAmelCase )
lowerCamelCase__ =int(math.pow(2 , math.ceil(math.loga(__lowerCAmelCase ) ) ) )
lowerCamelCase__ =matrixa
lowerCamelCase__ =matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowerCamelCase__ =actual_strassen(__lowerCAmelCase , __lowerCAmelCase )
# Removing the additional zeros
for i in range(0 , __lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a =[
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a =[[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 530 | 0 |
"""simple docstring"""
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE = " " ) -> list:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = 0
for index, char in enumerate(SCREAMING_SNAKE_CASE ):
if char == separator:
split_words.append(string[last_index:index] )
_UpperCAmelCase = index + 1
elif index + 1 == len(SCREAMING_SNAKE_CASE ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 494 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@slow
def __A ( self ):
_UpperCAmelCase = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
_UpperCAmelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
_UpperCAmelCase = tokenizer('Hello there' , return_tensors='np' ).input_ids
_UpperCAmelCase = tokenizer('Hi I am' , return_tensors='np' ).input_ids
_UpperCAmelCase = shift_tokens_right(a__ , model.config.pad_token_id , model.config.decoder_start_token_id )
_UpperCAmelCase = model(a__ , decoder_input_ids=a__ ).logits
_UpperCAmelCase = optax.softmax_cross_entropy(a__ , onehot(a__ , logits.shape[-1] ) ).mean()
_UpperCAmelCase = -(labels.shape[-1] * loss.item())
_UpperCAmelCase = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 494 | 1 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase (__A , __A , __A , __A="attention"):
"""simple docstring"""
_a = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
_a = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
_a = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
_a = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def lowerCAmelCase (__A , __A , __A , __A=False):
"""simple docstring"""
if split_mlp_wi:
_a = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
_a = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
_a = (wi_a, wi_a)
else:
_a = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
_a = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def lowerCAmelCase (__A , *, __A , __A):
"""simple docstring"""
_a = traverse_util.flatten_dict(variables['''target'''])
_a = {'''/'''.join(__A): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , __A)
_a = collections.OrderedDict()
# Shared embeddings.
_a = old['''token_embedder/embedding''']
# Encoder.
for i in range(__A):
# Block i, layer 0 (Self Attention).
_a = tax_layer_norm_lookup(__A , __A , '''encoder''' , '''pre_attention_layer_norm''')
_a , _a , _a , _a = tax_attention_lookup(__A , __A , '''encoder''' , '''attention''')
_a = layer_norm
_a = k.T
_a = o.T
_a = q.T
_a = v.T
# Block i, layer 1 (MLP).
_a = tax_layer_norm_lookup(__A , __A , '''encoder''' , '''pre_mlp_layer_norm''')
_a , _a = tax_mlp_lookup(__A , __A , '''encoder''' , __A)
_a = layer_norm
if split_mlp_wi:
_a = wi[0].T
_a = wi[1].T
else:
_a = wi.T
_a = wo.T
_a = old[
'''encoder/relpos_bias/rel_embedding'''
].T
_a = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(__A):
# Block i, layer 0 (Self Attention).
_a = tax_layer_norm_lookup(__A , __A , '''decoder''' , '''pre_self_attention_layer_norm''')
_a , _a , _a , _a = tax_attention_lookup(__A , __A , '''decoder''' , '''self_attention''')
_a = layer_norm
_a = k.T
_a = o.T
_a = q.T
_a = v.T
# Block i, layer 1 (Cross Attention).
_a = tax_layer_norm_lookup(__A , __A , '''decoder''' , '''pre_cross_attention_layer_norm''')
_a , _a , _a , _a = tax_attention_lookup(__A , __A , '''decoder''' , '''encoder_decoder_attention''')
_a = layer_norm
_a = k.T
_a = o.T
_a = q.T
_a = v.T
# Block i, layer 2 (MLP).
_a = tax_layer_norm_lookup(__A , __A , '''decoder''' , '''pre_mlp_layer_norm''')
_a , _a = tax_mlp_lookup(__A , __A , '''decoder''' , __A)
_a = layer_norm
if split_mlp_wi:
_a = wi[0].T
_a = wi[1].T
else:
_a = wi.T
_a = wo.T
_a = old['''decoder/decoder_norm/scale''']
_a = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a = old['''decoder/logits_dense/kernel'''].T
return new
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''')
_a = state_dict['''shared.weight''']
return state_dict
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
_a = checkpoints.load_tax_checkpoint(__A)
_a = convert_tax_to_pytorch(__A , num_layers=config.num_layers , is_encoder_only=__A)
_a = make_state_dict(__A , __A)
model.load_state_dict(__A , strict=__A)
def lowerCAmelCase (__A , __A , __A , __A = False):
"""simple docstring"""
_a = TaConfig.from_json_file(__A)
print(F'''Building PyTorch model from configuration: {config}''')
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a = TaEncoderModel(__A)
else:
_a = TaForConditionalGeneration(__A)
# Load weights from tf checkpoint
load_tax_weights_in_ta(__A , __A , __A , __A)
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''')
model.save_pretrained(__A)
# Verify that we can load the checkpoint.
model.from_pretrained(__A)
print('''Done''')
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
lowercase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 11 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCAmelCase_( lowercase_ : int = 2_00_00_00 ) -> int:
_lowerCamelCase = [0]
_lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
_lowerCamelCase = 0
# an estimate of b, using the quadratic formula
_lowerCamelCase = 42
# the largest integer less than b_estimate
_lowerCamelCase = 42
# the largest integer less than b_estimate
_lowerCamelCase = 42
# the triangle number corresponding to b_floor
_lowerCamelCase = 42
# the triangle number corresponding to b_ceil
_lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowerCamelCase = floor(lowercase_ )
_lowerCamelCase = ceil(lowercase_ )
_lowerCamelCase = triangle_numbers[b_floor]
_lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase = triangle_b_first_guess * triangle_a
_lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase = triangle_b_second_guess * triangle_a
_lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 661 | 0 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_A : Union[str, Any] = logging.getLogger(__name__)
def __magic_name__ ( __snake_case : List[str] , __snake_case : int ) -> Union[str, Any]:
if os.path.exists(_lowerCamelCase ):
if os.path.exists(os.path.join(_lowerCamelCase , "config.json" ) ) and os.path.isfile(
os.path.join(_lowerCamelCase , "config.json" ) ):
os.remove(os.path.join(_lowerCamelCase , "config.json" ) )
if os.path.exists(os.path.join(_lowerCamelCase , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(_lowerCamelCase , "pytorch_model.bin" ) ):
os.remove(os.path.join(_lowerCamelCase , "pytorch_model.bin" ) )
else:
os.makedirs(_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
def __magic_name__ ( __snake_case : int , __snake_case : Optional[int]=False ) -> Any:
lowercase : str = 2
if unlogit:
lowercase : Tuple = torch.pow(_lowerCamelCase , _lowerCamelCase )
lowercase : Optional[int] = p * torch.log(_lowerCamelCase )
lowercase : Optional[int] = 0
return -plogp.sum(dim=-1 )
def __magic_name__ ( __snake_case : int ) -> Union[str, Any]:
logger.info("lv, h >\t" + "\t".join(f"""{x + 1}""" for x in range(len(_lowerCamelCase ) ) ) )
for row in range(len(_lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + "\t".join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + "\t".join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Any=True , __snake_case : Dict=True , __snake_case : str=None , __snake_case : List[Any]=False ) -> int:
lowercase : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
lowercase : Optional[Any] = torch.zeros(_lowerCamelCase , _lowerCamelCase ).to(args.device )
lowercase : Union[str, Any] = torch.zeros(_lowerCamelCase , _lowerCamelCase ).to(args.device )
if head_mask is None:
lowercase : str = torch.ones(_lowerCamelCase , _lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowercase : Optional[Any] = None
lowercase : Dict = 0.0
lowercase : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(_lowerCamelCase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
lowercase : int = tuple(t.to(args.device ) for t in inputs )
(lowercase ) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowercase : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , head_mask=_lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowercase : Optional[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowerCamelCase ):
lowercase : Tuple = entropy(attn.detach() , _lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowercase : Optional[int] = 2
lowercase : Dict = torch.pow(torch.pow(_lowerCamelCase , _lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
lowercase : Optional[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(_lowerCamelCase )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(_lowerCamelCase )
logger.info("Head ranked by importance scores" )
lowercase : Any = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowercase : int = torch.arange(
head_importance.numel() , device=args.device )
lowercase : int = head_ranks.view_as(_lowerCamelCase )
print_ad_tensor(_lowerCamelCase )
return attn_entropy, head_importance, total_loss
def __magic_name__ ( __snake_case : int , __snake_case : Optional[int] , __snake_case : str ) -> str:
lowercase : Tuple = compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase )
lowercase : int = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , _lowerCamelCase , original_score * args.masking_threshold )
lowercase : int = torch.ones_like(_lowerCamelCase )
lowercase : List[str] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowercase : Dict = original_score
while current_score >= original_score * args.masking_threshold:
lowercase : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowercase : Optional[int] = float("Inf" )
lowercase : Optional[Any] = head_importance.view(-1 ).sort()[1]
if len(_lowerCamelCase ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
lowercase : Tuple = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
lowercase : int = new_head_mask.view(-1 )
lowercase : Optional[Any] = 0.0
lowercase : Optional[Any] = new_head_mask.view_as(_lowerCamelCase )
lowercase : Any = new_head_mask.clone().detach()
print_ad_tensor(_lowerCamelCase )
# Compute metric and head importance again
lowercase : Union[str, Any] = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , head_mask=_lowerCamelCase )
lowercase : Optional[Any] = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , _lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(_lowerCamelCase )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __magic_name__ ( __snake_case : str , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Tuple ) -> str:
lowercase : int = datetime.now()
lowercase : List[str] = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase )
lowercase : int = 1 / loss
lowercase : Optional[Any] = datetime.now() - before_time
lowercase : Tuple = sum(p.numel() for p in model.parameters() )
lowercase : Any = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase : Optional[Any] = [
v,
]
assert sum(len(_lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowerCamelCase )
lowercase : Union[str, Any] = sum(p.numel() for p in model.parameters() )
lowercase : Dict = datetime.now()
lowercase : List[Any] = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase , actually_pruned=_lowerCamelCase , )
lowercase : Optional[int] = 1 / loss
lowercase : Optional[int] = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , _lowerCamelCase , _lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , _lowerCamelCase , _lowerCamelCase )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(_lowerCamelCase , args.output_dir )
def __magic_name__ ( ) -> Dict:
lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=_lowerCamelCase , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=_lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=_lowerCamelCase , type=_lowerCamelCase , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=_lowerCamelCase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=_lowerCamelCase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=_lowerCamelCase , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=_lowerCamelCase , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=_lowerCamelCase , help="Batch size." )
parser.add_argument("--seed" , type=_lowerCamelCase , default=42 )
parser.add_argument("--local_rank" , type=_lowerCamelCase , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging." )
lowercase : Dict = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowercase : List[str] = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
lowercase : List[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowercase : Optional[Any] = torch.device("cuda" , args.local_rank )
lowercase : Optional[int] = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowercase : List[str] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowercase : List[Any] = nn.parallel.DistributedDataParallel(
_lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowerCamelCase )
elif args.n_gpu > 1:
lowercase : Union[str, Any] = nn.DataParallel(_lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , _lowerCamelCase )
# Prepare dataset
lowercase : Union[str, Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowercase : str = (torch.from_numpy(_lowerCamelCase ),)
lowercase : int = TensorDataset(*_lowerCamelCase )
lowercase : Union[str, Any] = RandomSampler(_lowerCamelCase )
lowercase : List[str] = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowercase : int = mask_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
prune_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 706 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_A : int = logging.get_logger(__name__)
class a__ ( a_ ):
def __init__( self , *_a , **_a ):
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _a , )
super().__init__(*_a , **_a )
| 518 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> int:
a_ : Tuple = abs(SCREAMING_SNAKE_CASE__ )
a_ : int = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> int:
a_ : Optional[int] = abs(SCREAMING_SNAKE_CASE__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> int:
return sum(int(SCREAMING_SNAKE_CASE__ ) for c in str(abs(SCREAMING_SNAKE_CASE__ ) ) )
def lowerCAmelCase_ ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> None:
a_ : List[Any] = F"""{func.__name__}({value})"""
a_ : List[Any] = timeit(F"""__main__.{call}""", setup="import __main__" )
print(F"""{call:56} = {func(SCREAMING_SNAKE_CASE__ )} -- {timing:.4f} seconds""" )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 237 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
# Initialise PyTorch model
a_ : int = BigBirdConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
a_ : Union[str, Any] = BigBirdForQuestionAnswering(SCREAMING_SNAKE_CASE__ )
else:
a_ : Optional[int] = BigBirdForPreTraining(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, is_trivia_qa=SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
) | 237 | 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase = 'src/diffusers'
# Matches is_xxx_available()
UpperCamelCase = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
UpperCamelCase = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
UpperCamelCase = '\n{0} = None\n'
UpperCamelCase = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
UpperCamelCase = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _A ( lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = _re_backend.findall(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 0:
return None
return "_and_".join(lowerCAmelCase_ )
def _A ( ):
"""simple docstring"""
with open(os.path.join(lowerCAmelCase_ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCAmelCase__ = 0
lowerCAmelCase__ = {}
# Go through the end of the file
while line_index < len(lowerCAmelCase_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCAmelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(lowerCAmelCase_ ) and len(lines[line_index] ) > 1:
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_single_line_import.search(lowerCAmelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def _A ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(lowerCAmelCase_ )
elif name.islower():
return DUMMY_FUNCTION.format(lowerCAmelCase_ , lowerCAmelCase_ )
else:
return DUMMY_CLASS.format(lowerCAmelCase_ , lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : str=None ):
"""simple docstring"""
if backend_specific_objects is None:
lowerCAmelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCAmelCase__ = {}
for backend, objects in backend_specific_objects.items():
lowerCAmelCase__ = "[" + ", ".join(F'"{b}"' for b in backend.split("_and_" ) ) + "]"
lowerCAmelCase__ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowerCAmelCase_ , lowerCAmelCase_ ) for o in objects] )
lowerCAmelCase__ = dummy_file
return dummy_files
def _A ( lowerCAmelCase_ : Optional[int]=False ):
"""simple docstring"""
lowerCAmelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCAmelCase__ = {"torch": "pt"}
# Locate actual dummy modules and read their content.
lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "utils" )
lowerCAmelCase__ = {
backend: os.path.join(lowerCAmelCase_ , F'dummy_{short_names.get(lowerCAmelCase_ , lowerCAmelCase_ )}_objects.py' )
for backend in dummy_files.keys()
}
lowerCAmelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowerCAmelCase_ ):
with open(lowerCAmelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.read()
else:
lowerCAmelCase__ = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(lowerCAmelCase_ , lowerCAmelCase_ )}_objects.py as the main '
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F'diffusers.utils.dummy_{short_names.get(lowerCAmelCase_ , lowerCAmelCase_ )}_objects.py. Run `make fix-copies` '
"to fix this." )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 125 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 5_0),)
def a ( self : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
lowerCAmelCase__ = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=0 , **SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a ( self : Dict ) -> Any:
pass
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , **SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a ( self : List[str] , **SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = 10
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def a ( self : Optional[int] ) -> List[str]:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
lowerCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a ( self : Tuple ) -> int:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> List[str]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def a ( self : List[str] ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def a ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> Union[str, Any]:
for t in [1, 5, 10]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> List[str]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowerCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
def a ( self : Union[str, Any] ) -> Optional[Any]:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def a ( self : Any ) -> Tuple:
lowerCAmelCase__ = self.full_loop()
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1e-2
assert abs(result_mean.item() - 0.2_580 ) < 1e-3
def a ( self : int ) -> Dict:
lowerCAmelCase__ = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1e-2
assert abs(result_mean.item() - 0.0_878 ) < 1e-3
def a ( self : Any ) -> Tuple:
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase__ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1e-2
assert abs(result_mean.item() - 0.2_995 ) < 1e-3
def a ( self : int ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase__ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1e-2
assert abs(result_mean.item() - 0.2_434 ) < 1e-3
| 125 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
) | 30 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case="" , __snake_case="train" ) -> Optional[Any]:
'''simple docstring'''
assert os.path.isdir(__snake_case )
__a =[]
__a =os.listdir(__snake_case )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__a =os.path.join(__snake_case , __snake_case )
if not os.path.isfile(__snake_case ):
continue
self.documents.append(__snake_case )
def __len__( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.documents )
def __getitem__( self , __snake_case ) -> Optional[Any]:
'''simple docstring'''
__a =self.documents[idx]
__a =document_path.split('/' )[-1]
with open(__snake_case , encoding='utf-8' ) as source:
__a =source.read()
__a , __a =process_story(__snake_case )
return document_name, story_lines, summary_lines
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
__a =list(filter(lambda _snake_case : len(_snake_case ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
__a =[_add_missing_period(_snake_case ) for line in nonempty_lines]
# gather article lines
__a =[]
__a =deque(_snake_case )
while True:
try:
__a =lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_snake_case )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__a =list(filter(lambda _snake_case : not t.startswith('@highlight' ) , _snake_case ) )
return story_lines, summary_lines
def UpperCamelCase_( _snake_case : List[str] ):
"""simple docstring"""
__a =['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def UpperCamelCase_( _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : int ):
"""simple docstring"""
if len(_snake_case ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_snake_case )) )
return sequence
def UpperCamelCase_( _snake_case : int , _snake_case : Tuple ):
"""simple docstring"""
__a =torch.ones_like(_snake_case )
__a =sequence == pad_token_id
__a =0
return mask
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Tuple , _snake_case : Optional[int] ):
"""simple docstring"""
__a =[tokenizer.encode(_snake_case ) for line in story_lines]
__a =[token for sentence in story_lines_token_ids for token in sentence]
__a =[tokenizer.encode(_snake_case ) for line in summary_lines]
__a =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def UpperCamelCase_( _snake_case : str , _snake_case : Any ):
"""simple docstring"""
__a =[]
for sequence in batch:
__a =-1
__a =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_snake_case )
return torch.tensor(_snake_case )
| 242 | 0 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __magic_name__ ( _UpperCamelCase ):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , **__magic_name__ , ):
"""simple docstring"""
super().__init__(features=__magic_name__ , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ , **__magic_name__ )
_lowerCAmelCase = Sql(
cache_dir=__magic_name__ , features=__magic_name__ , sql=__magic_name__ , con=__magic_name__ , **__magic_name__ , )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=__magic_name__ , download_mode=__magic_name__ , verification_mode=__magic_name__ , base_path=__magic_name__ , )
# Build dataset for splits
_lowerCAmelCase = self.builder.as_dataset(
split='train' , verification_mode=__magic_name__ , in_memory=self.keep_in_memory )
return dataset
class __magic_name__ :
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_lowerCAmelCase = dataset
_lowerCAmelCase = name
_lowerCAmelCase = con
_lowerCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCAmelCase = num_proc
_lowerCAmelCase = to_sql_kwargs
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.to_sql_kwargs.pop('sql' , __magic_name__ )
_lowerCAmelCase = self.to_sql_kwargs.pop('con' , __magic_name__ )
_lowerCAmelCase = self.to_sql_kwargs.pop('index' , __magic_name__ )
_lowerCAmelCase = self._write(index=__magic_name__ , **self.to_sql_kwargs )
return written
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = args
_lowerCAmelCase = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_lowerCAmelCase = query_table(
table=self.dataset.data , key=slice(__magic_name__ , offset + self.batch_size ) , indices=self.dataset._indices , )
_lowerCAmelCase = batch.to_pandas()
_lowerCAmelCase = df.to_sql(self.name , self.con , index=__magic_name__ , **__magic_name__ )
return num_rows or len(__magic_name__ )
def _lowerCamelCase ( self , __magic_name__ , **__magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_lowerCAmelCase , _lowerCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __magic_name__ , __magic_name__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 309 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_lowerCAmelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sgugger/tiny-distilbert-classification'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , torchscript=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , fpaa=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
# set architectures equal to `None`
_lowerCAmelCase = None
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__magic_name__ , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tinier_bart'
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tinier_bart'
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(__magic_name__ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(__magic_name__ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(__magic_name__ , 'train_time.csv' ) , env_info_csv_file=os.path.join(__magic_name__ , 'env.csv' ) , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
benchmark.run()
self.assertTrue(Path(os.path.join(__magic_name__ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'env.csv' ) ).exists() )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__magic_name__ ):
self.assertTrue(hasattr(__magic_name__ , 'sequential' ) )
self.assertTrue(hasattr(__magic_name__ , 'cumulative' ) )
self.assertTrue(hasattr(__magic_name__ , 'current' ) )
self.assertTrue(hasattr(__magic_name__ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , 'log.txt' ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__magic_name__ , 'log.txt' ) ).exists() )
| 309 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {"""vocab_file""": """sentencepiece.bpe.model"""}
SCREAMING_SNAKE_CASE__ : Any = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
SCREAMING_SNAKE_CASE__ : int = {
"""moussaKam/mbarthez""": 10_24,
"""moussaKam/barthez""": 10_24,
"""moussaKam/barthez-orangesum-title""": 10_24,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = """▁"""
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase = None , **_lowerCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : str = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
UpperCAmelCase__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
UpperCAmelCase__ : int = vocab_file
UpperCAmelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
UpperCAmelCase__ : Dict = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCAmelCase__ : Union[str, Any] = len(self.sp_model ) - 1
UpperCAmelCase__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
UpperCAmelCase__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : int = [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self ):
return len(self.sp_model )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self , _lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase__ : int = self.sp_model.PieceToId(_lowerCAmelCase )
return spm_id if spm_id else self.unk_token_id
def __UpperCAmelCase ( self , _lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : int = []
UpperCAmelCase__ : List[str] = """"""
UpperCAmelCase__ : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Tuple = []
else:
current_sub_tokens.append(_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
UpperCAmelCase__ : str = self.__dict__.copy()
UpperCAmelCase__ : List[Any] = None
return state
def __setstate__( self , _lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : Tuple = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
UpperCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 79 |
'''simple docstring'''
import os
import sys
import unittest
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase = os.path.join(git_repo_path, """src""", """transformers""")
__UpperCAmelCase = """
{0} = None
"""
__UpperCAmelCase = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
__UpperCAmelCase = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(lowerCamelCase_ , """tokenizers""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(lowerCamelCase_ , """tensorflow_text""" )
SCREAMING_SNAKE_CASE : List[str] = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(lowerCamelCase_ , """sentencepiece_and_tokenizers""" )
SCREAMING_SNAKE_CASE : Tuple = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(lowerCamelCase_ , """sentencepiece_and_tensorflow_text""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(lowerCamelCase_ , """sentencepiece_and_tokenizers_and_vision""" )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , lowerCamelCase_ )
self.assertIn("""tensorflow_text""" , lowerCamelCase_ )
self.assertIn("""sentencepiece_and_tokenizers""" , lowerCamelCase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(lowerCamelCase_ , """\nCONSTANT = None\n""" )
SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
lowerCamelCase_ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
SCREAMING_SNAKE_CASE : str = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
SCREAMING_SNAKE_CASE : Union[str, Any] = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , lowerCamelCase_ )
| 379 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_A = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def lowercase_ ( A__ ) -> Optional[Any]:
"""simple docstring"""
snake_case = {}
with open(_lowerCamelCase , "r" ) as file:
for line_number, line in enumerate(_lowerCamelCase ):
snake_case = line.strip()
if line:
snake_case = line.split()
snake_case = line_number
snake_case = words[0]
snake_case = value
return result
def lowercase_ ( A__ , A__ , A__ , A__ , A__ ) -> Optional[int]:
"""simple docstring"""
for attribute in key.split("." ):
snake_case = getattr(_lowerCamelCase , _lowerCamelCase )
snake_case = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
snake_case = PARAM_MAPPING[full_name.split("." )[-1]]
snake_case = "param"
if weight_type is not None and weight_type != "param":
snake_case = getattr(_lowerCamelCase , _lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
snake_case = hf_pointer
for attribute in hf_param_name.split("." ):
snake_case = getattr(_lowerCamelCase , _lowerCamelCase )
snake_case = shape_pointer.shape
# let's reduce dimension
snake_case = value[0]
else:
snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
snake_case = getattr(_lowerCamelCase , _lowerCamelCase )
snake_case = value
else:
snake_case = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowercase_ ( A__ , A__ , A__ , A__ , A__ ) -> Any:
"""simple docstring"""
snake_case = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
snake_case = PARAM_MAPPING[full_name.split("." )[-1]]
snake_case = "param"
if weight_type is not None and weight_type != "param":
snake_case = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case = ".".join([key, hf_param_name] )
else:
snake_case = key
snake_case = value if "lm_head" in full_key else value[0]
_A = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def lowercase_ ( A__ , A__ , A__=None , A__=None ) -> List[str]:
"""simple docstring"""
snake_case = False
for key, mapped_key in MAPPING.items():
snake_case = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
snake_case = True
if "*" in mapped_key:
snake_case = name.split(_lowerCamelCase )[0].split("." )[-2]
snake_case = mapped_key.replace("*" , _lowerCamelCase )
if "weight_g" in name:
snake_case = "weight_g"
elif "weight_v" in name:
snake_case = "weight_v"
elif "bias" in name:
snake_case = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case = "weight"
else:
snake_case = None
if hf_dict is not None:
rename_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return is_used
return is_used
def lowercase_ ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case = True
else:
snake_case = load_wavaveca_layer(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def lowercase_ ( A__ , A__ , A__ , A__ , A__ ) -> str:
"""simple docstring"""
snake_case = full_name.split("conv_layers." )[-1]
snake_case = name.split("." )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowercase_ ( A__ , A__ , A__=None , A__=None , A__=True , A__=False ) -> Tuple:
"""simple docstring"""
if config_path is not None:
snake_case = WavaVecaConfig.from_pretrained(_lowerCamelCase )
else:
snake_case = WavaVecaConfig()
if is_seq_class:
snake_case = read_txt_into_dict(_lowerCamelCase )
snake_case = idalabel
snake_case = WavaVecaForSequenceClassification(_lowerCamelCase )
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
feature_extractor.save_pretrained(_lowerCamelCase )
elif is_finetuned:
if dict_path:
snake_case = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
snake_case = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case = 0
snake_case = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
snake_case = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
snake_case = True if config.feat_extract_norm == "layer" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
snake_case = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
snake_case = WavaVecaForCTC(_lowerCamelCase )
else:
snake_case = WavaVecaForPreTraining(_lowerCamelCase )
if is_finetuned or is_seq_class:
snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
snake_case = argparse.Namespace(task="audio_pretraining" )
snake_case = fairseq.tasks.setup_task(_lowerCamelCase )
snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
snake_case = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
_A = parser.parse_args()
_A = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 706 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = "▁"
_A = {"vocab_file": "spiece.model"}
_A = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
_A = {
"google/pegasus-xsum": 5_12,
}
_A = logging.get_logger(__name__)
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__(self : Optional[Any] , _A : Any , _A : List[Any]="<pad>" , _A : int="</s>" , _A : Dict="<unk>" , _A : str="<mask_2>" , _A : Optional[int]="<mask_1>" , _A : Optional[Any]=None , _A : Tuple=1_0_3 , _A : Optional[Dict[str, Any]] = None , **_A : List[str] , ) -> None:
snake_case = offset
if additional_special_tokens is not None:
if not isinstance(_A , _A ):
raise TypeError(
f'additional_special_tokens should be of type {type(_A )}, but is'
f' {type(_A )}' )
snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(_A ) , self.offset - 1 )
]
if len(set(_A ) ) != len(_A ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
snake_case = additional_special_tokens_extended
else:
snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_A , unk_token=_A , mask_token=_A , pad_token=_A , mask_token_sent=_A , offset=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
snake_case = mask_token_sent
snake_case = vocab_file
snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# add special tokens to encoder dict
snake_case = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
snake_case = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase(self : str ) -> int:
return len(self.sp_model ) + self.offset
def UpperCAmelCase(self : List[str] ) -> Dict[str, int]:
snake_case = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Any ) -> List[Any]:
snake_case = self.__dict__.copy()
snake_case = None
return state
def __setstate__(self : str , _A : Union[str, Any] ) -> Tuple:
snake_case = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case = {}
snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase(self : List[Any] , _A : str ) -> List[str]:
return self.sp_model.encode(_A , out_type=_A )
def UpperCAmelCase(self : List[str] , _A : str ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
snake_case = self.sp_model.piece_to_id(_A )
return sp_id + self.offset
def UpperCAmelCase(self : Union[str, Any] , _A : int ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
snake_case = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase(self : List[Any] , _A : Tuple ) -> Tuple:
snake_case = []
snake_case = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_A ) + token
snake_case = []
else:
current_sub_tokens.append(_A )
out_string += self.sp_model.decode(_A )
return out_string.strip()
def UpperCAmelCase(self : List[Any] , _A : Tuple=False ) -> Tuple:
return 1
def UpperCAmelCase(self : Tuple , _A : Optional[int] ) -> Tuple:
snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase(self : str , _A : List , _A : Optional[List] = None , _A : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(_A )
elif token_ids_a is None:
return self._special_token_mask(_A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase(self : int , _A : Dict , _A : List[Any]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase(self : Optional[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(_A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , "wb" ) as fi:
snake_case = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 294 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "speech_to_text_2"
__UpperCamelCase = ["past_key_values"]
__UpperCamelCase = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=10000 , lowercase_ : Optional[Any]=6 , lowercase_ : Optional[Any]=2048 , lowercase_ : Any=4 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Any=True , lowercase_ : int="relu" , lowercase_ : List[str]=256 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[Any]=0.0 , lowercase_ : Any=0.0 , lowercase_ : List[str]=0.02 , lowercase_ : int=2 , lowercase_ : List[str]=True , lowercase_ : Any=1 , lowercase_ : str=0 , lowercase_ : Any=2 , lowercase_ : int=1024 , **lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = d_model
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : str = decoder_layers
SCREAMING_SNAKE_CASE_ : int = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = dropout
SCREAMING_SNAKE_CASE_ : str = attention_dropout
SCREAMING_SNAKE_CASE_ : Any = activation_dropout
SCREAMING_SNAKE_CASE_ : Optional[Any] = activation_function
SCREAMING_SNAKE_CASE_ : Union[str, Any] = init_std
SCREAMING_SNAKE_CASE_ : Dict = decoder_layerdrop
SCREAMING_SNAKE_CASE_ : Dict = use_cache
SCREAMING_SNAKE_CASE_ : Dict = decoder_layers
SCREAMING_SNAKE_CASE_ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ : Any = max_target_positions
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 512 |
"""simple docstring"""
def _A (__a ) -> list[list]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = current_set.copy()
for row_index, row in enumerate(__a ):
SCREAMING_SNAKE_CASE_ : Tuple = row[0]
for column_index, column in enumerate(__a ):
if magnitude == 0:
SCREAMING_SNAKE_CASE_ : int = column
continue
SCREAMING_SNAKE_CASE_ : str = column / magnitude
# Subtract to cancel term
SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_set[0]
SCREAMING_SNAKE_CASE_ : int = [first_row]
SCREAMING_SNAKE_CASE_ : Dict = current_set[1::]
for row in current_set:
SCREAMING_SNAKE_CASE_ : Optional[int] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__a )
continue
for column_index in range(len(__a ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__a )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
SCREAMING_SNAKE_CASE_ : Any = final_set[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : int = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = simplify(__a )
for i in range(len(__a ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __a )
SCREAMING_SNAKE_CASE_ : List[str] = resultant
return final_set
def _A (__a ) -> list:
"""simple docstring"""
if len(__a ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
SCREAMING_SNAKE_CASE_ : List[Any] = len(__a ) + 1
if any(len(__a ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(__a , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(__a ) == 1:
return [equations[0][-1] / equations[0][0]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = equations.copy()
if any(0 in row for row in data_set ):
SCREAMING_SNAKE_CASE_ : int = data_set.copy()
SCREAMING_SNAKE_CASE_ : Any = []
for row_index, row in enumerate(__a ):
if 0 not in row:
SCREAMING_SNAKE_CASE_ : Optional[int] = data_set.pop(__a )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , __a )
SCREAMING_SNAKE_CASE_ : Dict = data_set.copy()
SCREAMING_SNAKE_CASE_ : Any = simplify(__a )
SCREAMING_SNAKE_CASE_ : List[str] = simplified[::-1]
SCREAMING_SNAKE_CASE_ : list = []
for row in simplified:
SCREAMING_SNAKE_CASE_ : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
SCREAMING_SNAKE_CASE_ : Dict = row.copy()[: len(__a ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__a ) == 0:
solutions.append(0 )
continue
SCREAMING_SNAKE_CASE_ : Optional[int] = temp_row[1::]
SCREAMING_SNAKE_CASE_ : Tuple = temp_row[::-1]
for column_index, column in enumerate(__a ):
current_solution -= column * solutions[column_index]
solutions.append(__a )
SCREAMING_SNAKE_CASE_ : int = []
for item in solutions:
final.append(float(round(__a , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 512 | 1 |
import os
import numpy
import onnx
def _lowerCamelCase ( snake_case , snake_case ):
_lowerCAmelCase = a.name
_lowerCAmelCase = b.name
_lowerCAmelCase = ''
_lowerCAmelCase = ''
_lowerCAmelCase = a == b
_lowerCAmelCase = name_a
_lowerCAmelCase = name_b
return res
def _lowerCamelCase ( snake_case , snake_case , snake_case ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(snake_case , snake_case )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case , snake_case )
_graph_replace_input_with(node_proto.attribute[1].g , snake_case , snake_case )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case , snake_case )
def _lowerCamelCase ( snake_case , snake_case , snake_case ):
for n in graph_proto.node:
_node_replace_input_with(snake_case , snake_case , snake_case )
def _lowerCamelCase ( snake_case , snake_case , snake_case ):
_lowerCAmelCase = list(model.graph.initializer )
_lowerCAmelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_lowerCAmelCase = inits[i].name
_lowerCAmelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , snake_case , snake_case )
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = os.path.dirname(snake_case )
_lowerCAmelCase = os.path.basename(snake_case )
_lowerCAmelCase = onnx.load(os.path.join(snake_case , snake_case ) )
_lowerCAmelCase = list(model.graph.initializer )
_lowerCAmelCase = set()
_lowerCAmelCase = {}
_lowerCAmelCase = []
_lowerCAmelCase = 0
for i in range(len(snake_case ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(snake_case ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(snake_case )
dup_set.add(snake_case )
_lowerCAmelCase = inits[j].data_type
_lowerCAmelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , snake_case )
total_reduced_size += mem_size
_lowerCAmelCase = inits[i].name
_lowerCAmelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(snake_case )
else:
_lowerCAmelCase = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_024 / 1_024 / 1_024 , 'GB' )
_lowerCAmelCase = sorted(snake_case )
_remove_dup_initializers_from_model(snake_case , snake_case , snake_case )
_lowerCAmelCase = 'optimized_' + model_file_name
_lowerCAmelCase = os.path.join(snake_case , snake_case )
onnx.save(snake_case , snake_case )
return new_model
| 225 | import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: Optional[int] = logging.get_logger(__name__)
def _lowerCamelCase ( snake_case , snake_case , snake_case ):
_lowerCAmelCase = os.path.abspath(snake_case )
logger.info(F'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
_lowerCAmelCase = tf.train.list_variables(snake_case )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_lowerCAmelCase = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(F'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
_lowerCAmelCase = name[1:]
# figure out how many levels deep the name is
_lowerCAmelCase = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(snake_case )
# read data
_lowerCAmelCase = tf.train.load_variable(snake_case , snake_case )
names.append('/'.join(snake_case ) )
arrays.append(snake_case )
logger.info(F'Read a total of {len(snake_case ):,} layers' )
# Sanity check
if len(set(snake_case ) ) != 1:
raise ValueError(F'Found layer names with different depths (layer depth {list(set(snake_case ) )})' )
_lowerCAmelCase = list(set(snake_case ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(snake_case , snake_case ):
_lowerCAmelCase = full_name.split('/' )
_lowerCAmelCase = model
_lowerCAmelCase = []
for i, m_name in enumerate(snake_case ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
_lowerCAmelCase = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
_lowerCAmelCase = getattr(snake_case , 'embeddings' )
_lowerCAmelCase = getattr(snake_case , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
_lowerCAmelCase = getattr(snake_case , 'encoder' )
_lowerCAmelCase = getattr(snake_case , 'layer' )
_lowerCAmelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
_lowerCAmelCase = getattr(snake_case , 'pooler' )
_lowerCAmelCase = getattr(snake_case , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
_lowerCAmelCase = getattr(snake_case , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
_lowerCAmelCase = getattr(snake_case , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
_lowerCAmelCase = getattr(snake_case , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
_lowerCAmelCase = getattr(snake_case , 'token_type_embeddings' )
else:
raise ValueError(F'Unknown embedding layer with name {full_name}' )
trace.append('weight' )
_lowerCAmelCase = getattr(snake_case , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
_lowerCAmelCase = getattr(snake_case , 'attention' )
_lowerCAmelCase = getattr(snake_case , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
_lowerCAmelCase = getattr(snake_case , 'attention' )
_lowerCAmelCase = getattr(snake_case , 'output' )
_lowerCAmelCase = getattr(snake_case , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
_lowerCAmelCase = getattr(snake_case , 'attention' )
_lowerCAmelCase = getattr(snake_case , 'output' )
_lowerCAmelCase = getattr(snake_case , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
_lowerCAmelCase = getattr(snake_case , 'output' )
_lowerCAmelCase = getattr(snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
_lowerCAmelCase = getattr(snake_case , 'output' )
_lowerCAmelCase = getattr(snake_case , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
_lowerCAmelCase = getattr(snake_case , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
_lowerCAmelCase = getattr(snake_case , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
_lowerCAmelCase = getattr(snake_case , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
_lowerCAmelCase = getattr(snake_case , 'intermediate' )
_lowerCAmelCase = getattr(snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
_lowerCAmelCase = getattr(snake_case , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
_lowerCAmelCase = getattr(snake_case , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
_lowerCAmelCase = getattr(snake_case , 'weight' )
else:
logger.warning(F'Ignored {m_name}' )
# for certain layers reshape is necessary
_lowerCAmelCase = '.'.join(snake_case )
if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , snake_case ) or re.match(
R'(\S+)\.attention\.output\.dense\.weight' , snake_case ):
_lowerCAmelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_lowerCAmelCase = array.transpose()
if pointer.shape == array.shape:
_lowerCAmelCase = torch.from_numpy(snake_case )
else:
raise ValueError(
F'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
F' {array.shape}' )
logger.info(F'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def _lowerCamelCase ( snake_case , snake_case , snake_case ):
# Instantiate model
logger.info(F'Loading model based on config from {config_path}...' )
_lowerCAmelCase = BertConfig.from_json_file(snake_case )
_lowerCAmelCase = BertModel(snake_case )
# Load weights from checkpoint
logger.info(F'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(snake_case , snake_case , snake_case )
# Save pytorch-model
logger.info(F'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , snake_case )
if __name__ == "__main__":
_lowercase: str = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
_lowercase: Optional[int] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 225 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class a_ ( lowerCamelCase ):
lowercase = """detr"""
lowercase = ["""past_key_values"""]
lowercase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = backbone_config.get("""model_type""" )
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(_SCREAMING_SNAKE_CASE )
# set timm attributes to None
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None, None, None
UpperCamelCase = use_timm_backbone
UpperCamelCase = backbone_config
UpperCamelCase = num_channels
UpperCamelCase = num_queries
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = init_xavier_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = encoder_layers
UpperCamelCase = auxiliary_loss
UpperCamelCase = position_embedding_type
UpperCamelCase = backbone
UpperCamelCase = use_pretrained_backbone
UpperCamelCase = dilation
# Hungarian matcher
UpperCamelCase = class_cost
UpperCamelCase = bbox_cost
UpperCamelCase = giou_cost
# Loss coefficients
UpperCamelCase = mask_loss_coefficient
UpperCamelCase = dice_loss_coefficient
UpperCamelCase = bbox_loss_coefficient
UpperCamelCase = giou_loss_coefficient
UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def A__ ( self ) -> int:
"""simple docstring"""
return self.d_model
@classmethod
def A__ ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return cls(backbone_config=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Dict[str, any]:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCamelCase = self.backbone_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
class a_ ( lowerCamelCase ):
lowercase = version.parse("""1.11""" )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def A__ ( self ) -> float:
"""simple docstring"""
return 1e-5
@property
def A__ ( self ) -> int:
"""simple docstring"""
return 12
| 301 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
lowercase = ViTImageProcessor if is_vision_available() else None
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = (3, 32, 128)
UpperCamelCase = tempfile.mkdtemp()
# fmt: off
UpperCamelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCamelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + """\n""" )
UpperCamelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
UpperCamelCase = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
UpperCamelCase = Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) )
return image_input
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
UpperCamelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
UpperCamelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = """test"""
UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = """test"""
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.char_decode(_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = None
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.randn(1 , 27 , 38 )
UpperCamelCase = torch.randn(1 , 27 , 50257 )
UpperCamelCase = torch.randn(1 , 27 , 30522 )
UpperCamelCase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 301 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE_ = {
"""google/realm-cc-news-pretrained-embedder""": 5_1_2,
"""google/realm-cc-news-pretrained-encoder""": 5_1_2,
"""google/realm-cc-news-pretrained-scorer""": 5_1_2,
"""google/realm-cc-news-pretrained-openqa""": 5_1_2,
"""google/realm-orqa-nq-openqa""": 5_1_2,
"""google/realm-orqa-nq-reader""": 5_1_2,
"""google/realm-orqa-wq-openqa""": 5_1_2,
"""google/realm-orqa-wq-reader""": 5_1_2,
}
SCREAMING_SNAKE_CASE_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Any = PRETRAINED_INIT_CONFIGURATION
__snake_case : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Dict = RealmTokenizer
def __init__( self : Optional[Any] ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : Dict="[UNK]" ,lowerCamelCase__ : List[Any]="[SEP]" ,lowerCamelCase__ : List[str]="[PAD]" ,lowerCamelCase__ : Optional[int]="[CLS]" ,lowerCamelCase__ : List[Any]="[MASK]" ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=None ,**lowerCamelCase__ : Optional[Any] ,) -> List[Any]:
'''simple docstring'''
super().__init__(
lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,tokenize_chinese_chars=lowerCamelCase__ ,strip_accents=lowerCamelCase__ ,**lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,lowerCamelCase__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(lowerCamelCase__ ,normalizer_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE = text
SCREAMING_SNAKE_CASE = kwargs.pop("""text_pair""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = kwargs.pop("""return_tensors""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(lowerCamelCase__ ):
if batch_text_pair is not None:
SCREAMING_SNAKE_CASE = batch_text_pair[idx]
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = super().__call__(lowerCamelCase__ ,lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = encoded_candidates.get("""input_ids""" )
SCREAMING_SNAKE_CASE = encoded_candidates.get("""attention_mask""" )
SCREAMING_SNAKE_CASE = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(lowerCamelCase__ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowerCamelCase__ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {key: item for key, item in output_data.items() if len(lowerCamelCase__ ) != 0}
return BatchEncoding(lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str]=None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCamelCase__ ,name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 718 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : str = VQModel
__snake_case : Optional[Any] = "sample"
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Optional[Any]=(32, 32) ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase__ )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = VQModel.from_pretrained("""fusing/vqgan-dummy""" ,output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(lowerCamelCase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
SCREAMING_SNAKE_CASE = torch.randn(1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size )
SCREAMING_SNAKE_CASE = image.to(lowerCamelCase__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ).sample
SCREAMING_SNAKE_CASE = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-3 ) )
| 116 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Dict = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[Any] = 'vit_mae'
def __init__( self : str , lowerCAmelCase__ : Optional[int]=768 , lowerCAmelCase__ : Optional[int]=12 , lowerCAmelCase__ : List[Any]=12 , lowerCAmelCase__ : Dict=3072 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : List[str]=1e-1_2 , lowerCAmelCase__ : Union[str, Any]=224 , lowerCAmelCase__ : Dict=16 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=16 , lowerCAmelCase__ : Union[str, Any]=512 , lowerCAmelCase__ : Optional[Any]=8 , lowerCAmelCase__ : Optional[Any]=2048 , lowerCAmelCase__ : Optional[int]=0.75 , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : int , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = qkv_bias
_UpperCamelCase = decoder_num_attention_heads
_UpperCamelCase = decoder_hidden_size
_UpperCamelCase = decoder_num_hidden_layers
_UpperCamelCase = decoder_intermediate_size
_UpperCamelCase = mask_ratio
_UpperCamelCase = norm_pix_loss
| 98 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase : Any = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=3_0522, type=int)
lowercase : str = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
lowercase : int = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowercase : List[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase : int = [0] * args.vocab_size
for k, v in counter.items():
lowercase : List[Any] = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 116 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[str] = 'ctrl'
A_ : str = ['past_key_values']
A_ : Tuple = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCAmelCase=246534 , __UpperCAmelCase=256 , __UpperCAmelCase=1280 , __UpperCAmelCase=8192 , __UpperCAmelCase=48 , __UpperCAmelCase=16 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> int:
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = dff
_a = resid_pdrop
_a = embd_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = use_cache
super().__init__(**__UpperCAmelCase ) | 285 |
"""simple docstring"""
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
if not input_list:
return []
_a = [input_list.count(_lowerCAmelCase ) for value in input_list]
_a = max(_lowerCAmelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowerCAmelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 285 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.