code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( a , unittest.TestCase ):
lowercase__ : Tuple = OpenAIGPTTokenizer
lowercase__ : Any = OpenAIGPTTokenizerFast
lowercase__ : Tuple = True
lowercase__ : str = False
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
SCREAMING_SNAKE_CASE = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(_UpperCamelCase ) )
def __snake_case( self : Optional[Any] , _UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
return "lower newer", "lower newer"
def __snake_case( self : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE = "lower"
SCREAMING_SNAKE_CASE = ["low", "er</w>"]
SCREAMING_SNAKE_CASE = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = tokens + ["<unk>"]
SCREAMING_SNAKE_CASE = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
def __snake_case( self : List[str] , _UpperCamelCase : Optional[int]=15 ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# Simple input
SCREAMING_SNAKE_CASE = "This is a simple input"
SCREAMING_SNAKE_CASE = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowercase ( a ):
pass
| 403 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowercase ( a ):
lowercase__ : List[str] = """realm"""
def __init__( self : Dict , _UpperCamelCase : Optional[int]=30_522 , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : str=128 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Any=12 , _UpperCamelCase : Union[str, Any]=8 , _UpperCamelCase : str=3_072 , _UpperCamelCase : Tuple="gelu_new" , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Optional[Any]=512 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : Optional[int]=0.0_2 , _UpperCamelCase : int=1e-12 , _UpperCamelCase : Optional[int]=256 , _UpperCamelCase : Any=10 , _UpperCamelCase : Optional[int]=1e-3 , _UpperCamelCase : List[str]=5 , _UpperCamelCase : Any=320 , _UpperCamelCase : Union[str, Any]=13_353_718 , _UpperCamelCase : Optional[Any]=5_000 , _UpperCamelCase : Dict=1 , _UpperCamelCase : List[str]=0 , _UpperCamelCase : str=2 , **_UpperCamelCase : Optional[int] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
# Common config
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = retriever_proj_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = num_candidates
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = layer_norm_eps
# Reader config
SCREAMING_SNAKE_CASE = span_hidden_size
SCREAMING_SNAKE_CASE = max_span_width
SCREAMING_SNAKE_CASE = reader_layer_norm_eps
SCREAMING_SNAKE_CASE = reader_beam_size
SCREAMING_SNAKE_CASE = reader_seq_len
# Retrieval config
SCREAMING_SNAKE_CASE = num_block_records
SCREAMING_SNAKE_CASE = searcher_beam_size
| 403 | 1 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = DownBlockaD # noqa F405
_snake_case = "down"
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_lowercase : Optional[int] = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = ResnetDownsampleBlockaD # noqa F405
_snake_case = "down"
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Tuple = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = AttnDownBlockaD # noqa F405
_snake_case = "down"
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_lowercase : str = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = CrossAttnDownBlockaD # noqa F405
_snake_case = "down"
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_lowercase : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
_lowercase : List[Any] = 3_2
return init_dict, inputs_dict
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_lowercase : Any = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = SimpleCrossAttnDownBlockaD # noqa F405
_snake_case = "down"
@property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Optional[int] = super().prepare_init_args_and_inputs_for_common()
_lowercase : Dict = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_lowercase : Union[str, Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = SkipDownBlockaD # noqa F405
_snake_case = "down"
@property
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_lowercase : int = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = AttnSkipDownBlockaD # noqa F405
_snake_case = "down"
@property
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_lowercase : int = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = DownEncoderBlockaD # noqa F405
_snake_case = "down"
@property
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
return super().get_dummy_input(include_temb=lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Dict = {
'in_channels': 3_2,
'out_channels': 3_2,
}
_lowercase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_lowercase : Any = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = AttnDownEncoderBlockaD # noqa F405
_snake_case = "down"
@property
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return super().get_dummy_input(include_temb=lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : List[str] = {
'in_channels': 3_2,
'out_channels': 3_2,
}
_lowercase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_lowercase : Any = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = UNetMidBlockaD # noqa F405
_snake_case = "mid"
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : List[Any] = {
'in_channels': 3_2,
'temb_channels': 1_2_8,
}
_lowercase : Any = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_lowercase : Union[str, Any] = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = UNetMidBlockaDCrossAttn # noqa F405
_snake_case = "mid"
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : List[str] = super().prepare_init_args_and_inputs_for_common()
_lowercase : Optional[Any] = 3_2
return init_dict, inputs_dict
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_lowercase : List[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = UNetMidBlockaDSimpleCrossAttn # noqa F405
_snake_case = "mid"
@property
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_lowercase : Dict = super().prepare_init_args_and_inputs_for_common()
_lowercase : Union[str, Any] = 3_2
return init_dict, inputs_dict
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_lowercase : Union[str, Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = UpBlockaD # noqa F405
_snake_case = "up"
@property
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_lowercase : Optional[Any] = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = ResnetUpsampleBlockaD # noqa F405
_snake_case = "up"
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_lowercase : Dict = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = CrossAttnUpBlockaD # noqa F405
_snake_case = "up"
@property
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_lowercase : Optional[int] = super().prepare_init_args_and_inputs_for_common()
_lowercase : Optional[Any] = 3_2
return init_dict, inputs_dict
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_lowercase : Any = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = SimpleCrossAttnUpBlockaD # noqa F405
_snake_case = "up"
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ , include_encoder_hidden_states=lowerCamelCase_ )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_lowercase : Any = super().prepare_init_args_and_inputs_for_common()
_lowercase : Optional[Any] = 3_2
return init_dict, inputs_dict
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_lowercase : List[Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = AttnUpBlockaD # noqa F405
_snake_case = "up"
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : int = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = SkipUpBlockaD # noqa F405
_snake_case = "up"
@property
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_lowercase : str = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = AttnSkipUpBlockaD # noqa F405
_snake_case = "up"
@property
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_lowercase : Tuple = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = UpDecoderBlockaD # noqa F405
_snake_case = "up"
@property
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return super().get_dummy_input(include_temb=lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : Dict = {'in_channels': 3_2, 'out_channels': 3_2}
_lowercase : Dict = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(lowerCamelCase_ )
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = AttnUpDecoderBlockaD # noqa F405
_snake_case = "up"
@property
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return super().get_dummy_input(include_temb=lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_lowercase : int = {'in_channels': 3_2, 'out_channels': 3_2}
_lowercase : Tuple = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_lowercase : List[Any] = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(lowerCamelCase_ )
| 715 | """simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : Optional[int] = int(__UpperCAmelCase )
_lowercase , _lowercase , _lowercase : Union[str, Any] = t // 3_600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=300 ):
"""simple docstring"""
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : List[Any] = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : Optional[Any] = F'''{elt:.6f}''' if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else str(__UpperCAmelCase )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase :
_snake_case = 5
_snake_case = 0.2
def __init__( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional["NotebookTrainingTracker"] = None , lowerCamelCase_ : int = 3_0_0 , ):
"""simple docstring"""
_lowercase : int = total
_lowercase : Dict = '' if prefix is None else prefix
_lowercase : List[Any] = leave
_lowercase : Dict = parent
_lowercase : str = width
_lowercase : Any = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = None
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : bool = False , lowerCamelCase_ : str = None ):
"""simple docstring"""
_lowercase : Dict = value
if comment is not None:
_lowercase : List[str] = comment
if self.last_value is None:
_lowercase : List[Any] = time.time()
_lowercase : str = value
_lowercase : Any = None
_lowercase : List[Any] = self.warmup
_lowercase : Union[str, Any] = 1
self.update_bar(lowerCamelCase_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : Tuple = time.time()
_lowercase : List[str] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : List[Any] = self.elapsed_time / (value - self.start_value)
else:
_lowercase : List[Any] = None
if value >= self.total:
_lowercase : Optional[Any] = self.total
_lowercase : str = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : int = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase_ )
_lowercase : Optional[int] = value
_lowercase : Dict = current_time
if self.average_time_per_item is None:
_lowercase : Union[str, Any] = 1
else:
_lowercase : Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any]=None ):
"""simple docstring"""
_lowercase : str = ' ' * (len(str(self.total ) ) - len(str(lowerCamelCase_ ) )) + str(lowerCamelCase_ )
if self.elapsed_time is None:
_lowercase : Dict = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowercase : Optional[int] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
_lowercase : int = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : int = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : List[str] = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class _lowerCamelCase (__lowerCamelCase ):
def __init__( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple=None ):
"""simple docstring"""
super().__init__(lowerCamelCase_ )
_lowercase : Tuple = None if column_names is None else [column_names]
_lowercase : Optional[int] = None
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Union[str, Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : Dict = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
if self.inner_table is None:
_lowercase : Optional[int] = [list(values.keys() ), list(values.values() )]
else:
_lowercase : str = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase_ )
_lowercase : List[Any] = columns
self.inner_table.append([values[c] for c in columns] )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : int=3_0_0 ):
"""simple docstring"""
_lowercase : List[str] = NotebookProgressBar(lowerCamelCase_ , prefix=lowerCamelCase_ , parent=self , width=lowerCamelCase_ )
return self.child_bar
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_lowercase : Any = None
self.display()
class _lowerCamelCase (__lowerCamelCase ):
def __init__( self : Optional[Any] ):
"""simple docstring"""
_lowercase : int = None
_lowercase : Any = None
_lowercase : Optional[int] = False
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
_lowercase : Any = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : Dict = 0
_lowercase : Optional[Any] = 0
_lowercase : Union[str, Any] = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
_lowercase : Tuple = NotebookTrainingTracker(state.max_steps , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Dict ):
"""simple docstring"""
_lowercase : Optional[Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
_lowercase : Tuple = False
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : str ):
"""simple docstring"""
if not has_length(lowerCamelCase_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Tuple = self.training_tracker.add_child(len(lowerCamelCase_ ) )
else:
_lowercase : Optional[Any] = NotebookProgressBar(len(lowerCamelCase_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Union[str, Any] = None
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : int=None , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : Any = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : Any = state.global_step
self.training_tracker.write_line(lowerCamelCase_ )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : str=None , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
if self.training_tracker is not None:
_lowercase : List[str] = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
_lowercase : Union[str, Any] = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Optional[int] = int(state.epoch )
else:
_lowercase : Optional[Any] = state.global_step
_lowercase : int = 'eval'
for k in metrics:
if k.endswith('_loss' ):
_lowercase : Tuple = re.sub(r'\_loss$' , '' , lowerCamelCase_ )
_lowercase : Optional[Any] = metrics.pop('total_flos' , lowerCamelCase_ )
_lowercase : List[str] = metrics.pop('epoch' , lowerCamelCase_ )
_lowercase : List[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''' , lowerCamelCase_ )
_lowercase : Tuple = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , lowerCamelCase_ )
_lowercase : Any = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , lowerCamelCase_ )
_lowercase : Union[str, Any] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , lowerCamelCase_ )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowercase : Optional[Any] = v
else:
_lowercase : Tuple = k.split('_' )
_lowercase : List[Any] = ' '.join([part.capitalize() for part in splits[1:]] )
_lowercase : List[Any] = v
self.training_tracker.write_line(lowerCamelCase_ )
self.training_tracker.remove_child()
_lowercase : Any = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Dict = True
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Any ):
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=lowerCamelCase_ )
_lowercase : List[Any] = None
| 283 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( A : Dict ):
SCREAMING_SNAKE_CASE : List[str] = torch.load(A , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE : List[Any] = torch.load(A , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE : Optional[int] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(A )
SCREAMING_SNAKE_CASE : int = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE : Optional[int] = sd.pop(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE : str = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE : str = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE : Any = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE : Tuple = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = torch.split(A , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE : List[str] = q
SCREAMING_SNAKE_CASE : List[str] = k
SCREAMING_SNAKE_CASE : List[Any] = v
del sd[key]
return sd
@torch.no_grad()
def UpperCAmelCase ( A : int , A : Union[str, Any] , A : Dict=None ):
SCREAMING_SNAKE_CASE : Optional[Any] = load_checkpoint(A )
if config is not None:
SCREAMING_SNAKE_CASE : Tuple = OPTConfig.from_pretrained(A )
else:
SCREAMING_SNAKE_CASE : int = OPTConfig()
SCREAMING_SNAKE_CASE : Union[str, Any] = OPTModel(A ).half().eval()
model.load_state_dict(A )
# Check results
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowerCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 527 | 0 |
from math import factorial
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = real
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = [1] * rank
else:
SCREAMING_SNAKE_CASE__ = rank
def __repr__( self : Any ) -> int:
return (
f'''{self.real}+'''
f'''{"+".join(str(__lowerCamelCase )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def lowercase_ ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __lowerCamelCase )
def __add__( self : Any , __lowerCamelCase : Optional[Any] ) -> str:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return Dual(self.real + other , self.duals )
SCREAMING_SNAKE_CASE__ = self.duals.copy()
SCREAMING_SNAKE_CASE__ = other.duals.copy()
if len(__lowerCamelCase ) > len(__lowerCamelCase ):
o_dual.extend([1] * (len(__lowerCamelCase ) - len(__lowerCamelCase )) )
elif len(__lowerCamelCase ) < len(__lowerCamelCase ):
s_dual.extend([1] * (len(__lowerCamelCase ) - len(__lowerCamelCase )) )
SCREAMING_SNAKE_CASE__ = []
for i in range(len(__lowerCamelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __lowerCamelCase )
a = __add__
def __sub__( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
return self + other * -1
def __mul__( self : Union[str, Any] , __lowerCamelCase : str ) -> int:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __lowerCamelCase )
a = __mul__
def __truediv__( self : str , __lowerCamelCase : str ) -> Optional[Any]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __lowerCamelCase )
raise ValueError
def __floordiv__( self : str , __lowerCamelCase : Union[str, Any] ) -> Any:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __lowerCamelCase )
raise ValueError
def __pow__( self : Tuple , __lowerCamelCase : Tuple ) -> List[str]:
if n < 0 or isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
SCREAMING_SNAKE_CASE__ = self
for _ in range(n - 1 ):
x *= self
return x
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if not callable(_A ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(_A , (float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(_A , _A ):
raise ValueError('''differentiate() requires an int as input for order''' )
SCREAMING_SNAKE_CASE__ = Dual(_A , 1 )
SCREAMING_SNAKE_CASE__ = func(_A )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 720 |
import math
class UpperCAmelCase__ :
"""simple docstring"""
def lowercase_ ( self : int , __lowerCamelCase : list[list[float]] , __lowerCamelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 0.0
for i in range(len(__lowerCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase_ ( self : Optional[int] , __lowerCamelCase : list[list[int | float]] , __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : float ) -> list[list[int | float]]:
for i in range(len(__lowerCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
SCREAMING_SNAKE_CASE__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
SCREAMING_SNAKE_CASE__ = SelfOrganizingMap()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = 0.5
for _ in range(_A ):
for j in range(len(_A ) ):
# training sample
SCREAMING_SNAKE_CASE__ = training_samples[j]
# Compute the winning vector
SCREAMING_SNAKE_CASE__ = self_organizing_map.get_winner(_A , _A )
# Update the winning vector
SCREAMING_SNAKE_CASE__ = self_organizing_map.update(_A , _A , _A , _A )
# classify test sample
SCREAMING_SNAKE_CASE__ = [0, 0, 0, 1]
SCREAMING_SNAKE_CASE__ = self_organizing_map.get_winner(_A , _A )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 472 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_UpperCamelCase = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
_UpperCamelCase = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
_UpperCamelCase = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def UpperCamelCase__ (self , __a , __a ) -> int:
"""simple docstring"""
UpperCAmelCase__ = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
UpperCAmelCase__ = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 146 |
import math
def UpperCamelCase_( snake_case__: float , snake_case__: float ) -> float:
if (
not isinstance(snake_case__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def UpperCamelCase_( snake_case__: float , snake_case__: float ) -> float:
if (
not isinstance(snake_case__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146 | 1 |
import math
from datetime import datetime, timedelta
def _lowercase ( UpperCamelCase_ ) -> datetime:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = year % 19
SCREAMING_SNAKE_CASE__ = year % 4
SCREAMING_SNAKE_CASE__ = year % 7
SCREAMING_SNAKE_CASE__ = math.floor(year / 100 )
SCREAMING_SNAKE_CASE__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
SCREAMING_SNAKE_CASE__ = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
SCREAMING_SNAKE_CASE__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 18 )
else:
return datetime(__UpperCAmelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
__snake_case = """will be""" if year > datetime.now().year else """was"""
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 713 |
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
SCREAMING_SNAKE_CASE__ = str(bin(UpperCamelCase_ ) )
binary_number += "0" * shift_amount
return binary_number
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
SCREAMING_SNAKE_CASE__ = str(bin(UpperCamelCase_ ) )[2:]
if shift_amount >= len(UpperCamelCase_ ):
return "0b0"
SCREAMING_SNAKE_CASE__ = binary_number[: len(UpperCamelCase_ ) - shift_amount]
return "0b" + shifted_binary_number
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
SCREAMING_SNAKE_CASE__ = '0' + str(bin(UpperCamelCase_ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
SCREAMING_SNAKE_CASE__ = len(bin(UpperCamelCase_ )[3:] ) # Find 2's complement of number
SCREAMING_SNAKE_CASE__ = bin(abs(UpperCamelCase_ ) - (1 << binary_number_length) )[3:]
SCREAMING_SNAKE_CASE__ = (
'1' + '0' * (binary_number_length - len(UpperCamelCase_ )) + binary_number
)
if shift_amount >= len(UpperCamelCase_ ):
return "0b" + binary_number[0] * len(UpperCamelCase_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(UpperCamelCase_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 400 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase__ : list[int] ) -> list[int]:
'''simple docstring'''
if len(lowerCAmelCase__ ) == 0:
return array
A , A = min(lowerCAmelCase__ ), max(lowerCAmelCase__ )
# Compute the variables
A = _max - _min + 1
A , A = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
A = i - _min
A = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
A = 0
for i in range(lowerCAmelCase__ ):
while holes_repeat[i] > 0:
A = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case :Dict =input('Enter numbers separated by comma:\n')
__snake_case :Tuple =[int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted)) | 106 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( UpperCamelCase , unittest.TestCase ):
lowercase_ : Optional[int] = SpeechTaTokenizer
lowercase_ : Dict = False
lowercase_ : Any = True
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase :Union[str, Any] = SpeechTaTokenizer(UpperCAmelCase )
lowerCAmelCase :Any = AddedToken('<mask>' , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase )
lowerCAmelCase :List[Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : str , UpperCAmelCase : Any ) -> Union[str, Any]:
lowerCAmelCase :Tuple = 'this is a test'
lowerCAmelCase :Union[str, Any] = 'this is a test'
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=False , UpperCAmelCase : List[Any]=20 , UpperCAmelCase : str=5 ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase :Dict = self.get_input_output_texts(UpperCAmelCase )
lowerCAmelCase :str = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCAmelCase :Union[str, Any] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return text, ids
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
lowerCAmelCase :List[Any] = '<pad>'
lowerCAmelCase :Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCAmelCase__ ( self : Any ) -> Any:
lowerCAmelCase :Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(UpperCAmelCase ) , 81 )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
lowerCAmelCase :Any = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase :Optional[int] = tokenizer.vocab_size
lowerCAmelCase :Union[str, Any] = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase :Optional[Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
lowerCAmelCase :Union[str, Any] = tokenizer.add_tokens(UpperCAmelCase )
lowerCAmelCase :str = tokenizer.vocab_size
lowerCAmelCase :List[Any] = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , len(UpperCAmelCase ) )
self.assertEqual(UpperCAmelCase , all_size + len(UpperCAmelCase ) )
lowerCAmelCase :int = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=UpperCAmelCase )
self.assertGreaterEqual(len(UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase :List[Any] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
lowerCAmelCase :str = tokenizer.add_special_tokens(UpperCAmelCase )
lowerCAmelCase :Optional[Any] = tokenizer.vocab_size
lowerCAmelCase :Tuple = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , len(UpperCAmelCase ) )
self.assertEqual(UpperCAmelCase , all_size_a + len(UpperCAmelCase ) )
lowerCAmelCase :List[str] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=UpperCAmelCase )
self.assertGreaterEqual(len(UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self : str ) -> int:
pass
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
lowerCAmelCase :Optional[int] = self.get_tokenizer()
lowerCAmelCase :List[str] = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(UpperCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
lowerCAmelCase :List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
lowerCAmelCase :Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
# fmt: off
self.assertListEqual(UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
lowerCAmelCase :int = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def UpperCAmelCase__ ( self : str ) -> Tuple:
# Use custom sequence because this tokenizer does not handle numbers.
lowerCAmelCase :Any = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
lowerCAmelCase :List[str] = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=UpperCAmelCase , ) | 553 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "cvt"
def __init__(self , __a=3 , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[64, 1_92, 3_84] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[4.0, 4.0, 4.0] , __a=[0.0, 0.0, 0.0] , __a=[0.0, 0.0, 0.0] , __a=[0.0, 0.0, 0.1] , __a=[True, True, True] , __a=[False, False, True] , __a=["dw_bn", "dw_bn", "dw_bn"] , __a=[3, 3, 3] , __a=[1, 1, 1] , __a=[2, 2, 2] , __a=[1, 1, 1] , __a=[1, 1, 1] , __a=0.02 , __a=1e-1_2 , **__a , ) -> str:
super().__init__(**__a )
UpperCamelCase = num_channels
UpperCamelCase = patch_sizes
UpperCamelCase = patch_stride
UpperCamelCase = patch_padding
UpperCamelCase = embed_dim
UpperCamelCase = num_heads
UpperCamelCase = depth
UpperCamelCase = mlp_ratio
UpperCamelCase = attention_drop_rate
UpperCamelCase = drop_rate
UpperCamelCase = drop_path_rate
UpperCamelCase = qkv_bias
UpperCamelCase = cls_token
UpperCamelCase = qkv_projection_method
UpperCamelCase = kernel_qkv
UpperCamelCase = padding_kv
UpperCamelCase = stride_kv
UpperCamelCase = padding_q
UpperCamelCase = stride_q
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
| 721 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCAmelCase__ = HUGGINGFACE_HUB_CACHE
lowerCAmelCase__ = '''config.json'''
lowerCAmelCase__ = '''diffusion_pytorch_model.bin'''
lowerCAmelCase__ = '''diffusion_flax_model.msgpack'''
lowerCAmelCase__ = '''model.onnx'''
lowerCAmelCase__ = '''diffusion_pytorch_model.safetensors'''
lowerCAmelCase__ = '''weights.pb'''
lowerCAmelCase__ = '''https://huggingface.co'''
lowerCAmelCase__ = default_cache_path
lowerCAmelCase__ = '''diffusers_modules'''
lowerCAmelCase__ = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
lowerCAmelCase__ = ['''fp16''', '''non-ema''']
lowerCAmelCase__ = '''.self_attn'''
| 544 | 0 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__UpperCamelCase : int = TypeVar("""T""")
class __SCREAMING_SNAKE_CASE( Generic[T] ):
def __init__( self: List[str] , UpperCamelCase: bool = True ) -> None:
snake_case__ = {} # dictionary of lists
snake_case__ = directed
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: T , UpperCamelCase: T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase )
self.adj_list[destination_vertex].append(UpperCamelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase )
snake_case__ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCamelCase )
snake_case__ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case__ = [destination_vertex]
snake_case__ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase )
snake_case__ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case__ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case__ = [destination_vertex]
snake_case__ = []
return self
def __repr__( self: Any ) -> str:
return pformat(self.adj_list )
| 328 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "char"
_UpperCAmelCase = "bpe"
_UpperCAmelCase = "wp"
__UpperCamelCase : Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["image_processor", "char_tokenizer"]
_UpperCAmelCase = "ViTImageProcessor"
_UpperCAmelCase = "MgpstrTokenizer"
def __init__( self: Optional[int] , UpperCamelCase: Dict=None , UpperCamelCase: Any=None , **UpperCamelCase: Any ) -> str:
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
snake_case__ = kwargs.pop('feature_extractor' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
snake_case__ = tokenizer
snake_case__ = AutoTokenizer.from_pretrained('gpt2' )
snake_case__ = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self: str , UpperCamelCase: List[str]=None , UpperCamelCase: Any=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Optional[int] ) -> List[str]:
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
snake_case__ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None:
snake_case__ = self.char_tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['input_ids']
return inputs
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[str] ) -> int:
snake_case__ , snake_case__ , snake_case__ = sequences
snake_case__ = char_preds.size(0 )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'char' )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'bpe' )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'wp' )
snake_case__ = []
snake_case__ = []
for i in range(UpperCamelCase ):
snake_case__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case__ = scores.index(max(UpperCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case__ = {}
snake_case__ = final_strs
snake_case__ = final_scores
snake_case__ = char_strs
snake_case__ = bpe_strs
snake_case__ = wp_strs
return out
def lowerCAmelCase_ ( self: str , UpperCamelCase: str , UpperCamelCase: Tuple ) -> Optional[int]:
if format == DecodeType.CHARACTER:
snake_case__ = self.char_decode
snake_case__ = 1
snake_case__ = '[s]'
elif format == DecodeType.BPE:
snake_case__ = self.bpe_decode
snake_case__ = 2
snake_case__ = '#'
elif format == DecodeType.WORDPIECE:
snake_case__ = self.wp_decode
snake_case__ = 1_02
snake_case__ = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''' )
snake_case__ , snake_case__ = [], []
snake_case__ = pred_logits.size(0 )
snake_case__ = pred_logits.size(1 )
snake_case__ , snake_case__ = pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase , sorted=UpperCamelCase )
snake_case__ = preds_index.view(-1 , UpperCamelCase )[:, 1:]
snake_case__ = decoder(UpperCamelCase )
snake_case__ , snake_case__ = torch.nn.functional.softmax(UpperCamelCase , dim=2 ).max(dim=2 )
snake_case__ = preds_max_prob[:, 1:]
for index in range(UpperCamelCase ):
snake_case__ = preds_str[index].find(UpperCamelCase )
snake_case__ = preds_str[index][:pred_eos]
snake_case__ = preds_index[index].cpu().tolist()
snake_case__ = pred_index.index(UpperCamelCase ) if eos_token in pred_index else -1
snake_case__ = preds_max_prob[index][: pred_eos_index + 1]
snake_case__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase )
conf_scores.append(UpperCamelCase )
return dec_strs, conf_scores
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: str ) -> int:
snake_case__ = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
def lowerCAmelCase_ ( self: int , UpperCamelCase: Optional[int] ) -> Dict:
return self.bpe_tokenizer.batch_decode(UpperCamelCase )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: str ) -> Union[str, Any]:
snake_case__ = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
| 328 | 1 |
a_ : Any = [0, 2, 4, 6, 8]
a_ : Any = [1, 3, 5, 7, 9]
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
a__ = 0
for digit in range(10 ):
a__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __UpperCAmelCase , __UpperCAmelCase )
return result
a__ = 0
for digita in range(10 ):
a__ = digita
if (remainder + digita) % 2 == 0:
a__ = ODD_DIGITS
else:
a__ = EVEN_DIGITS
for digita in other_parity_digits:
a__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __UpperCAmelCase , __UpperCAmelCase , )
return result
def __a ( __UpperCAmelCase = 9 ):
a__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__UpperCAmelCase , 0 , [0] * length , __UpperCAmelCase )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 148 |
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
# Check if the input is valid
if not len(__UpperCAmelCase ) == len(__UpperCAmelCase ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
a__ , a__ , a__ = equationa
a__ , a__ , a__ = equationa
# Calculate the determinants of the matrices
a__ = aa * ba - aa * ba
a__ = ca * ba - ca * ba
a__ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
a__ = determinant_x / determinant
a__ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 148 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : Optional[Any] = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'swinv2'
SCREAMING_SNAKE_CASE__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _lowerCamelCase=224 , _lowerCamelCase=4 , _lowerCamelCase=3 , _lowerCamelCase=96 , _lowerCamelCase=[2, 2, 6, 2] , _lowerCamelCase=[3, 6, 12, 24] , _lowerCamelCase=7 , _lowerCamelCase=4.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=32 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
a :int = image_size
a :int = patch_size
a :Any = num_channels
a :Dict = embed_dim
a :str = depths
a :List[Any] = len(_lowerCamelCase )
a :Optional[Any] = num_heads
a :Union[str, Any] = window_size
a :List[str] = mlp_ratio
a :Tuple = qkv_bias
a :int = hidden_dropout_prob
a :List[str] = attention_probs_dropout_prob
a :List[Any] = drop_path_rate
a :List[Any] = hidden_act
a :List[str] = use_absolute_embeddings
a :Union[str, Any] = layer_norm_eps
a :Dict = initializer_range
a :List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a :List[Any] = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
a :int = (0, 0, 0, 0)
| 445 |
snake_case : Any = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
snake_case : Any = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __lowerCamelCase ( UpperCAmelCase_ : dict[int, list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : list[bool] ):
"""simple docstring"""
a :Any = True
a :Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
order.append(UpperCAmelCase_ )
return order
def __lowerCamelCase ( UpperCAmelCase_ : dict[int, list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : list[bool] ):
"""simple docstring"""
a :Optional[Any] = True
a :int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return component
def __lowerCamelCase ( UpperCAmelCase_ : dict[int, list[int]] ):
"""simple docstring"""
a :int = len(UpperCAmelCase_ ) * [False]
a :dict[int, list[int]] = {vert: [] for vert in range(len(UpperCAmelCase_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCAmelCase_ )
a :Optional[Any] = []
for i, was_visited in enumerate(UpperCAmelCase_ ):
if not was_visited:
order += topology_sort(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a :Tuple = []
a :int = len(UpperCAmelCase_ ) * [False]
for i in range(len(UpperCAmelCase_ ) ):
a :Union[str, Any] = order[len(UpperCAmelCase_ ) - i - 1]
if not visited[vert]:
a :Union[str, Any] = find_components(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
components_list.append(UpperCAmelCase_ )
return components_list
| 445 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
lowercase = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
lowercase = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
lowercase = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE="binary" , SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = fa_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a )
return {"f1": float(_a ) if score.size == 1 else score}
| 721 |
'''simple docstring'''
import math
import sys
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = ''''''
try:
with open(snake_case__ , '''rb''' ) as binary_file:
A : Optional[Any] = binary_file.read()
for dat in data:
A : Union[str, Any] = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = {'''0''': '''0''', '''1''': '''1'''}
A, A : Union[str, Any] = '''''', ''''''
A : str = len(snake_case__ )
for i in range(len(snake_case__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A : Dict = lexicon[curr_string]
result += last_match_id
A : Any = last_match_id + '''0'''
if math.loga(snake_case__ ).is_integer():
A : Optional[int] = {}
for curr_key in list(snake_case__ ):
A : Any = lexicon.pop(snake_case__ )
A : List[str] = new_lex
A : Dict = last_match_id + '''1'''
index += 1
A : List[str] = ''''''
return result
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = 8
try:
with open(snake_case__ , '''wb''' ) as opened_file:
A : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case__ ) , snake_case__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A : Union[str, Any] = data_bits[counter:]
A : Tuple = data_bits[counter + 1 :]
return data_bits
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = read_file_binary(snake_case__ )
A : Dict = remove_prefix(snake_case__ )
A : Union[str, Any] = decompress_data(snake_case__ )
write_file_binary(snake_case__ , snake_case__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 343 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
A_ : Tuple = logging.getLogger(__name__)
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowerCamelCase__ = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
lowerCamelCase__ = field(
default=1_024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
lowerCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def __UpperCamelCase ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
snake_case__ : str = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
snake_case__ : List[str] = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowerCamelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def UpperCamelCase__ ( ) -> int:
'''simple docstring'''
snake_case__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case__ : str = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
datasets.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
snake_case__ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case__ : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
snake_case__ : List[str] = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
snake_case__ : int = data_args.train_file.split(""".""" )[-1]
snake_case__ : int = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
snake_case__ : List[str] = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
snake_case__ : int = load_dataset("""csv""" , data_files=__magic_name__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
snake_case__ : Tuple = load_dataset("""json""" , data_files=__magic_name__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
snake_case__ : Dict = raw_datasets["""train"""].features["""label"""].names
snake_case__ : Optional[Any] = len(__magic_name__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
snake_case__ : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__magic_name__ , )
snake_case__ : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
snake_case__ : Dict = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case__ : Dict = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
snake_case__ : Union[str, Any] = {"""Refused""": 0, """Entailed""": 1}
snake_case__ : List[str] = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
snake_case__ : int = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__magic_name__ : Union[str, Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(__magic_name__ : int ):
snake_case__ : List[Any] = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
snake_case__ : Optional[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
snake_case__ : Optional[int] = examples["""statement"""]
snake_case__ : Dict = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
snake_case__ : Optional[Any] = tokenizer(__magic_name__ , __magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ )
snake_case__ : Tuple = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
snake_case__ : List[str] = raw_datasets.map(
__magic_name__ , batched=__magic_name__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
snake_case__ : Optional[int] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
snake_case__ : List[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
snake_case__ : int = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
snake_case__ : Any = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
snake_case__ : List[Any] = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
snake_case__ : Dict = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__magic_name__ ) ) , 3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__magic_name__ : EvalPrediction ):
snake_case__ : Union[str, Any] = p.predictions[0] if isinstance(p.predictions , __magic_name__ ) else p.predictions
snake_case__ : Union[str, Any] = np.argmax(__magic_name__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case__ : Union[str, Any] = default_data_collator
elif training_args.fpaa:
snake_case__ : Optional[int] = DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 )
else:
snake_case__ : str = None
# Initialize our Trainer
snake_case__ : str = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
snake_case__ : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case__ : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case__ : Optional[int] = last_checkpoint
snake_case__ : Any = trainer.train(resume_from_checkpoint=__magic_name__ )
snake_case__ : str = train_result.metrics
snake_case__ : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__magic_name__ )
)
snake_case__ : Any = min(__magic_name__ , len(__magic_name__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __magic_name__ )
trainer.save_metrics("""train""" , __magic_name__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case__ : Optional[Any] = trainer.evaluate(eval_dataset=__magic_name__ )
snake_case__ : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__magic_name__ )
snake_case__ : List[str] = min(__magic_name__ , len(__magic_name__ ) )
trainer.log_metrics("""eval""" , __magic_name__ )
trainer.save_metrics("""eval""" , __magic_name__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
snake_case__ : Dict = predict_dataset.remove_columns("""label""" )
snake_case__ : str = trainer.predict(__magic_name__ , metric_key_prefix="""predict""" ).predictions
snake_case__ : str = np.argmax(__magic_name__ , axis=1 )
snake_case__ : Tuple = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(__magic_name__ , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__magic_name__ ):
snake_case__ : List[str] = label_list[item]
writer.write(f"{index}\t{item}\n" )
snake_case__ : Union[str, Any] = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : str ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 38 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=True , ):
super().__init__()
snake_case__ : str = layers_per_block
snake_case__ : int = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : List[Any] = None
snake_case__ : List[Any] = nn.ModuleList([] )
# down
snake_case__ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = output_channel
snake_case__ : Union[str, Any] = block_out_channels[i]
snake_case__ : int = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : str = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
snake_case__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
snake_case__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : str = 2 * out_channels if double_z else out_channels
snake_case__ : int = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : Union[str, Any] = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = x
snake_case__ : int = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
snake_case__ : List[str] = down_block(__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
snake_case__ : Any = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
snake_case__ : Any = layers_per_block
snake_case__ : Optional[Any] = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : Optional[int] = in_channels if norm_type == """spatial""" else None
# mid
snake_case__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
snake_case__ : List[Any] = list(reversed(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = output_channel
snake_case__ : Optional[Any] = reversed_block_out_channels[i]
snake_case__ : List[str] = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : int = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : int = output_channel
# out
if norm_type == "spatial":
snake_case__ : List[Any] = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : Union[str, Any] = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : int = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = z
snake_case__ : Any = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
snake_case__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
snake_case__ : int = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : List[Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : Dict = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
snake_case__ : Optional[Any] = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="random" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
super().__init__()
snake_case__ : int = n_e
snake_case__ : Optional[int] = vq_embed_dim
snake_case__ : int = beta
snake_case__ : Optional[int] = legacy
snake_case__ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
snake_case__ : Optional[Any] = self.used.shape[0]
snake_case__ : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ : Dict = self.re_embed
snake_case__ : List[str] = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
snake_case__ : Union[str, Any] = n_e
snake_case__ : str = sane_index_shape
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : Dict = inds.reshape(ishape[0] , -1 )
snake_case__ : Any = self.used.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ : List[Any] = match.argmax(-1 )
snake_case__ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ : Optional[Any] = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : int = inds.reshape(ishape[0] , -1 )
snake_case__ : Optional[int] = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ : List[Any] = 0 # simply set to zero
snake_case__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# reshape z -> (batch, height, width, channel) and flatten
snake_case__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ : Dict = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
snake_case__ : Union[str, Any] = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
snake_case__ : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ : str = self.remap_to_used(__SCREAMING_SNAKE_CASE )
snake_case__ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ : Optional[int] = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ : int = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
snake_case__ : str = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
snake_case__ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : Tuple = parameters
snake_case__ , snake_case__ : Any = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
snake_case__ : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ : Optional[int] = deterministic
snake_case__ : Optional[int] = torch.exp(0.5 * self.logvar )
snake_case__ : Any = torch.exp(self.logvar )
if self.deterministic:
snake_case__ : List[str] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
# make sure sample is on the same device as the parameters and has same dtype
snake_case__ : Dict = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ : Optional[int] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.mean
| 38 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCamelCase__ : int = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class _lowerCAmelCase ( unittest.TestCase, __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = load_tool("""text-question-answering""" )
self.tool.setup()
A_ : Optional[Any] = load_tool("""text-question-answering""" , remote=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> int:
A_ : int = self.tool(_lowerCamelCase , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_lowerCamelCase , """launched the BigScience Research Workshop""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Any = self.remote_tool(_lowerCamelCase , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_lowerCamelCase , """launched the BigScience Research Workshop""" )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = self.tool(text=_lowerCamelCase , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_lowerCamelCase , """launched the BigScience Research Workshop""" )
def UpperCAmelCase_ ( self ) -> str:
A_ : Any = self.remote_tool(text=_lowerCamelCase , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_lowerCamelCase , """launched the BigScience Research Workshop""" )
| 385 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : Tuple = split_dict._to_yaml_list()
assert len(a_ ) == len(a_ )
A_ : Union[str, Any] = SplitDict._from_yaml_list(a_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
A_ : List[Any] = None
# the split name of split_dict takes over the name of the split info object
A_ : int = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=a_ ), SplitInfo(dataset_name="""my_dataset""" )] )
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : int = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 385 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCamelCase = logging.get_logger(__name__)
class _A ( UpperCAmelCase_ ):
lowercase_ : Optional[int] = '''mask2former'''
lowercase_ : str = ['''swin''']
lowercase_ : Any = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Dict , lowerCamelCase__ : Optional[Dict] = None , lowerCamelCase__ : int = 2_56 , lowerCamelCase__ : int = 2_56 , lowerCamelCase__ : int = 2_56 , lowerCamelCase__ : int = 10_24 , lowerCamelCase__ : str = "relu" , lowerCamelCase__ : int = 6 , lowerCamelCase__ : int = 10 , lowerCamelCase__ : int = 8 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 20_48 , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : int = 4 , lowerCamelCase__ : int = 2_55 , lowerCamelCase__ : int = 1_00 , lowerCamelCase__ : float = 0.1 , lowerCamelCase__ : float = 2.0 , lowerCamelCase__ : float = 5.0 , lowerCamelCase__ : float = 5.0 , lowerCamelCase__ : int = 1_25_44 , lowerCamelCase__ : float = 3.0 , lowerCamelCase__ : float = 0.75 , lowerCamelCase__ : float = 0.02 , lowerCamelCase__ : float = 1.0 , lowerCamelCase__ : bool = True , lowerCamelCase__ : List[int] = [4, 8, 16, 32] , lowerCamelCase__ : bool = None , **lowerCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
__UpperCamelCase : Union[str, Any] = CONFIG_MAPPING["""swin"""](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : List[str] = backbone_config.pop("""model_type""" )
__UpperCamelCase : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase : str = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
__UpperCamelCase : Tuple = backbone_config
__UpperCamelCase : Union[str, Any] = feature_size
__UpperCamelCase : Union[str, Any] = mask_feature_size
__UpperCamelCase : List[Any] = hidden_dim
__UpperCamelCase : Tuple = encoder_feedforward_dim
__UpperCamelCase : List[Any] = activation_function
__UpperCamelCase : Tuple = encoder_layers
__UpperCamelCase : Union[str, Any] = decoder_layers
__UpperCamelCase : Union[str, Any] = num_attention_heads
__UpperCamelCase : Union[str, Any] = dropout
__UpperCamelCase : Dict = dim_feedforward
__UpperCamelCase : Any = pre_norm
__UpperCamelCase : List[Any] = enforce_input_projection
__UpperCamelCase : str = common_stride
__UpperCamelCase : Any = ignore_value
__UpperCamelCase : Dict = num_queries
__UpperCamelCase : int = no_object_weight
__UpperCamelCase : List[str] = class_weight
__UpperCamelCase : List[Any] = mask_weight
__UpperCamelCase : List[str] = dice_weight
__UpperCamelCase : Optional[int] = train_num_points
__UpperCamelCase : List[Any] = oversample_ratio
__UpperCamelCase : str = importance_sample_ratio
__UpperCamelCase : Union[str, Any] = init_std
__UpperCamelCase : List[Any] = init_xavier_std
__UpperCamelCase : List[Any] = use_auxiliary_loss
__UpperCamelCase : List[Any] = feature_strides
__UpperCamelCase : Dict = output_auxiliary_logits
__UpperCamelCase : Optional[Any] = decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def a ( cls : str , lowerCamelCase__ : PretrainedConfig , **lowerCamelCase__ : Any ):
"""simple docstring"""
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def a ( self : str ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
__UpperCamelCase : Dict = self.backbone_config.to_dict()
__UpperCamelCase : Dict = self.__class__.model_type
return output
| 269 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCamelCase = 'CompVis/stable-diffusion-v1-1'
UpperCamelCase = 'CompVis/stable-diffusion-v1-2'
UpperCamelCase = 'CompVis/stable-diffusion-v1-3'
UpperCamelCase = 'CompVis/stable-diffusion-v1-4'
class _A ( UpperCAmelCase_ ):
def __init__( self : List[str] , lowerCamelCase__ : AutoencoderKL , lowerCamelCase__ : CLIPTextModel , lowerCamelCase__ : CLIPTokenizer , lowerCamelCase__ : UNetaDConditionModel , lowerCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase__ : StableDiffusionSafetyChecker , lowerCamelCase__ : CLIPImageProcessor , lowerCamelCase__ : bool = True , ):
"""simple docstring"""
super()._init_()
__UpperCamelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : int = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : List[str] = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : str = StableDiffusionPipeline(
vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , requires_safety_checker=lowerCamelCase__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def a ( self : Optional[Any] ):
"""simple docstring"""
return {k: getattr(self , lowerCamelCase__ ) for k in self.config.keys() if not k.startswith("""_""" )}
def a ( self : List[str] , lowerCamelCase__ : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__UpperCamelCase : Dict = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase__ )
def a ( self : str ):
"""simple docstring"""
self.enable_attention_slicing(lowerCamelCase__ )
@torch.no_grad()
def a ( self : Any , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : str , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def a ( self : Union[str, Any] , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Dict , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def a ( self : Union[str, Any] , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Any , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def a ( self : Dict , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : List[Any] , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def a ( self : Optional[Any] , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Dict , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(lowerCamelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` must be divisible by 8 but are {height} and {width}.' )
# Get first result from Stable Diffusion Checkpoint v1.1
__UpperCamelCase : Optional[int] = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
__UpperCamelCase : int = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
__UpperCamelCase : Optional[Any] = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
__UpperCamelCase : List[Any] = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 269 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case_ : Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : Tuple , _a : str , _a : List[str]=7 , _a : Union[str, Any]=3 , _a : List[str]=18 , _a : List[Any]=30 , _a : Optional[Any]=400 , _a : Dict=None , _a : Union[str, Any]=True , _a : List[str]=True , _a : Optional[Any]=None , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 20, '''width''': 20}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =do_convert_rgb
_SCREAMING_SNAKE_CASE =[512, 1024, 2048, 4096]
_SCREAMING_SNAKE_CASE =patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
_SCREAMING_SNAKE_CASE =Image.open(requests.get(_a , stream=_a ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self )
@property
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_dummy_image()
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
_SCREAMING_SNAKE_CASE =2048
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''pt''' , max_patches=_a )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1E-3 , rtol=1E-3 ) )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
_SCREAMING_SNAKE_CASE =True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
_SCREAMING_SNAKE_CASE ='''Hello'''
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='''pt''' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self , num_channels=4 )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) | 709 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = LxmertTokenizer
UpperCAmelCase = LxmertTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCamelCase ( self : List[str] , _a : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''UNwant\u00E9d,running'''
_SCREAMING_SNAKE_CASE ='''unwanted, running'''
return input_text, output_text
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE ='''I was born in 92000, and this is falsé.'''
_SCREAMING_SNAKE_CASE =tokenizer.tokenize(_a )
_SCREAMING_SNAKE_CASE =rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_SCREAMING_SNAKE_CASE =tokenizer.encode(_a , add_special_tokens=_a )
_SCREAMING_SNAKE_CASE =rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =tokenizer.encode(_a )
_SCREAMING_SNAKE_CASE =rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a ) | 191 | 0 |
class __lowercase :
def __init__( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> int:
"""simple docstring"""
UpperCAmelCase = name
UpperCAmelCase = value
UpperCAmelCase = weight
def __repr__( self : Optional[int] ) -> Dict:
"""simple docstring"""
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.value
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
return self.name
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.weight
def _lowercase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.value / self.weight
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->List[str]:
UpperCAmelCase = []
for i in range(len(lowerCAmelCase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Union[str, Any]:
UpperCAmelCase = sorted(lowerCAmelCase_ , key=lowerCAmelCase_ , reverse=lowerCAmelCase_ )
UpperCAmelCase = []
UpperCAmelCase , UpperCAmelCase = 0.0, 0.0
for i in range(len(lowerCAmelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _UpperCamelCase ( ) ->str:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 377 |
from __future__ import annotations
import math
def _UpperCamelCase ( lowerCAmelCase_ ) ->bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__a = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def _UpperCamelCase ( lowerCAmelCase_ ) ->list[int]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
UpperCAmelCase = []
for num in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase = 0
while 2 * i * i <= odd_composites[num]:
UpperCAmelCase = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase_ ) == n:
return list_nums
return []
def _UpperCamelCase ( ) ->int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 377 | 1 |
import math
import os
import sys
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ) -> Tuple:
UpperCamelCase__ : int = ''''''
try:
with open(UpperCamelCase__ , '''rb''' ) as binary_file:
UpperCamelCase__ : Tuple = binary_file.read()
for dat in data:
UpperCamelCase__ : Any = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
lexicon.pop(UpperCamelCase__ )
UpperCamelCase__ : Optional[int] = last_match_id
if math.loga(UpperCamelCase__ ).is_integer():
for curr_key in lexicon:
UpperCamelCase__ : Dict = '''0''' + lexicon[curr_key]
UpperCamelCase__ : Tuple = bin(UpperCamelCase__ )[2:]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ) -> Optional[int]:
UpperCamelCase__ : int = {'''0''': '''0''', '''1''': '''1'''}
UpperCamelCase__ : List[str] = '''''', ''''''
UpperCamelCase__ : Union[str, Any] = len(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
index += 1
UpperCamelCase__ : List[Any] = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCamelCase__ : str = lexicon[curr_string]
result += last_match_id
return result
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
UpperCamelCase__ : List[str] = os.path.getsize(UpperCamelCase__ )
UpperCamelCase__ : Dict = bin(UpperCamelCase__ )[2:]
UpperCamelCase__ : Optional[Any] = len(UpperCamelCase__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
UpperCamelCase__ : Tuple = 8
try:
with open(UpperCamelCase__ , '''wb''' ) as opened_file:
UpperCamelCase__ : Tuple = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(UpperCamelCase__ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
UpperCamelCase__ : Optional[Any] = read_file_binary(UpperCamelCase__ )
UpperCamelCase__ : Union[str, Any] = compress_data(UpperCamelCase__ )
UpperCamelCase__ : Optional[Any] = add_file_length(UpperCamelCase__ , UpperCamelCase__ )
write_file_binary(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 711 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=2 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[int] = patch_size
UpperCamelCase__ : Any = num_channels
UpperCamelCase__ : Optional[Any] = is_training
UpperCamelCase__ : Dict = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : str = num_hidden_layers
UpperCamelCase__ : int = num_attention_heads
UpperCamelCase__ : List[Any] = intermediate_size
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : Tuple = type_sequence_label_size
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : List[str] = scope
UpperCamelCase__ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase__ : Any = (image_size // patch_size) ** 2
UpperCamelCase__ : Any = num_patches + 2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Dict = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Tuple = TFDeiTModel(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = TFDeiTForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase__ : Optional[int] = 1
UpperCamelCase__ : Any = TFDeiTForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.type_sequence_label_size
UpperCamelCase__ : Optional[Any] = TFDeiTForImageClassification(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ : int = 1
UpperCamelCase__ : int = TFDeiTForImageClassification(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = config_and_inputs
UpperCamelCase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = TFDeiTModelTester(self )
UpperCamelCase__ : str = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : str = [*signature.parameters.keys()]
UpperCamelCase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Any = TFDeiTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
UpperCamelCase__ : str = self.default_image_processor
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
# forward pass
UpperCamelCase__ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase__ : Union[str, Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 462 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["CLIPFeatureExtractor"]
__UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | """simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : Any =logging.get_logger(__name__)
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = ["""input_features""", """is_longer"""]
def __init__( self , _lowercase=64 , _lowercase=48000 , _lowercase=480 , _lowercase=10 , _lowercase=1024 , _lowercase=0.0 , _lowercase=False , _lowercase = 0 , _lowercase = 14000 , _lowercase = None , _lowercase = "fusion" , _lowercase = "repeatpad" , **_lowercase , ) -> Union[str, Any]:
super().__init__(
feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
_lowerCamelCase : int = top_db
_lowerCamelCase : int = truncation
_lowerCamelCase : List[str] = padding
_lowerCamelCase : Optional[Any] = fft_window_size
_lowerCamelCase : str = (fft_window_size >> 1) + 1
_lowerCamelCase : List[str] = hop_length
_lowerCamelCase : str = max_length_s
_lowerCamelCase : Dict = max_length_s * sampling_rate
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : List[Any] = frequency_min
_lowerCamelCase : Optional[int] = frequency_max
_lowerCamelCase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowercase , min_frequency=_lowercase , max_frequency=_lowercase , sampling_rate=_lowercase , norm=_lowercase , mel_scale='''htk''' , )
_lowerCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowercase , min_frequency=_lowercase , max_frequency=_lowercase , sampling_rate=_lowercase , norm='''slaney''' , mel_scale='''slaney''' , )
def a__ ( self ) -> Dict[str, Any]:
_lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
_lowerCamelCase : Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a__ ( self , _lowercase , _lowercase = None ) -> np.ndarray:
_lowerCamelCase : Optional[int] = spectrogram(
_lowercase , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_lowercase , log_mel='''dB''' , )
return log_mel_spectrogram.T
def a__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
_lowerCamelCase : Any = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowerCamelCase : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowerCamelCase : Optional[int] = [0]
# randomly choose index for each part
_lowerCamelCase : Dict = np.random.choice(ranges[0] )
_lowerCamelCase : str = np.random.choice(ranges[1] )
_lowerCamelCase : Optional[Any] = np.random.choice(ranges[2] )
_lowerCamelCase : Tuple = mel[idx_front : idx_front + chunk_frames, :]
_lowerCamelCase : str = mel[idx_middle : idx_middle + chunk_frames, :]
_lowerCamelCase : Union[str, Any] = mel[idx_back : idx_back + chunk_frames, :]
_lowerCamelCase : Any = torch.tensor(mel[None, None, :] )
_lowerCamelCase : Dict = torch.nn.functional.interpolate(
_lowercase , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_lowercase )
_lowerCamelCase : Optional[Any] = mel_shrink[0][0].numpy()
_lowerCamelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowerCamelCase : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowerCamelCase : List[Any] = len(_lowercase ) - max_length
_lowerCamelCase : Dict = np.random.randint(0 , overflow + 1 )
_lowerCamelCase : int = waveform[idx : idx + max_length]
_lowerCamelCase : Any = self._np_extract_fbank_features(_lowercase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowerCamelCase : Tuple = self._np_extract_fbank_features(_lowercase , self.mel_filters )
_lowerCamelCase : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowerCamelCase : Dict = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowerCamelCase : Tuple = np.stack([mel, mel, mel, mel] , axis=0 )
_lowerCamelCase : int = False
else:
_lowerCamelCase : str = self._random_mel_fusion(_lowercase , _lowercase , _lowercase )
_lowerCamelCase : Dict = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowerCamelCase : int = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowerCamelCase : Tuple = int(max_length / len(_lowercase ) )
_lowerCamelCase : Tuple = np.stack(np.tile(_lowercase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowerCamelCase : List[str] = int(max_length / len(_lowercase ) )
_lowerCamelCase : str = np.stack(np.tile(_lowercase , _lowercase ) )
_lowerCamelCase : Any = np.pad(_lowercase , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
_lowerCamelCase : str = self._np_extract_fbank_features(_lowercase , self.mel_filters )
_lowerCamelCase : List[str] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowerCamelCase : List[str] = self._np_extract_fbank_features(_lowercase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ) -> BatchFeature:
_lowerCamelCase : Union[str, Any] = truncation if truncation is not None else self.truncation
_lowerCamelCase : Union[str, Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_lowerCamelCase : Tuple = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowerCamelCase : str = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : Optional[Any] = [np.asarray(_lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
_lowerCamelCase : List[str] = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Union[str, Any] = [np.asarray(_lowercase )]
# convert to mel spectrogram, truncate and pad if needed.
_lowerCamelCase : Optional[Any] = [
self._get_input_mel(_lowercase , max_length if max_length else self.nb_max_samples , _lowercase , _lowercase )
for waveform in raw_speech
]
_lowerCamelCase : Dict = []
_lowerCamelCase : Any = []
for mel, longer in padded_inputs:
input_mel.append(_lowercase )
is_longer.append(_lowercase )
if truncation == "fusion" and sum(_lowercase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowerCamelCase : int = np.random.randint(0 , len(_lowercase ) )
_lowerCamelCase : List[str] = True
if isinstance(input_mel[0] , _lowercase ):
_lowerCamelCase : str = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowerCamelCase : Dict = [[longer] for longer in is_longer]
_lowerCamelCase : List[Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
_lowerCamelCase : Tuple = BatchFeature(_lowercase )
if return_tensors is not None:
_lowerCamelCase : str = input_features.convert_to_tensors(_lowercase )
return input_features
| 434 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE_ = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : int=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _UpperCAmelCase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=3_2 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=0.0_2 , ) -> Any:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = initializer_range
def a_ ( self ) -> Tuple:
UpperCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase = shift_tokens_right(_a , 1 , 2 )
UpperCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_a , )
UpperCAmelCase = prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def a_ ( self ) -> int:
UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> str:
UpperCAmelCase = 2_0
UpperCAmelCase = model_class_name(_a )
UpperCAmelCase = model.encode(inputs_dict['input_ids'] )
UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , _a , _a )
UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , _a , decoder_attention_mask=_a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_a , )
UpperCAmelCase = model.decode(_a , _a )
UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
UpperCAmelCase = 2_0
UpperCAmelCase = model_class_name(_a )
UpperCAmelCase = model.encode(inputs_dict['input_ids'] )
UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , _a , _a )
UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , _a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_a , decoder_position_ids=_a , )
UpperCAmelCase = model.decode(_a , _a , decoder_attention_mask=_a )
UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = 9_9
def a_ ( self ) -> Dict:
UpperCAmelCase = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase = input_ids.shape[0]
UpperCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = self._get_config_and_data()
UpperCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(_a )
UpperCAmelCase = lm_model(input_ids=_a )
UpperCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _a )
def a_ ( self ) -> List[Any]:
UpperCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
UpperCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(_a )
UpperCAmelCase = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
UpperCAmelCase = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase = lm_model(input_ids=_a , decoder_input_ids=_a )
UpperCAmelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _a )
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
UpperCAmelCase = shift_tokens_right(_a , 1 , 2 )
UpperCAmelCase = np.equal(_a , 1 ).astype(np.floataa ).sum()
UpperCAmelCase = np.equal(_a , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_a , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _UpperCAmelCase ( UpperCamelCase_ , unittest.TestCase , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : Optional[int] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def a_ ( self ) -> str:
UpperCAmelCase = FlaxBlenderbotSmallModelTester(self )
def a_ ( self ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_a , _a , _a )
def a_ ( self ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_a , _a , _a )
def a_ ( self ) -> Dict:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(_a , _a )
UpperCAmelCase = model_class(_a )
@jax.jit
def encode_jitted(lowercase_ , lowercase_=None , **lowercase_ ):
return model.encode(input_ids=_a , attention_mask=_a )
with self.subTest('JIT Enabled' ):
UpperCAmelCase = encode_jitted(**_a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase = encode_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
def a_ ( self ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = model_class(_a )
UpperCAmelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase_ , lowercase_ , lowercase_ ):
return model.decode(
decoder_input_ids=_a , decoder_attention_mask=_a , encoder_outputs=_a , )
with self.subTest('JIT Enabled' ):
UpperCAmelCase = decode_jitted(**_a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase = decode_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a_ ( self ) -> Any:
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase = model(_a )
self.assertIsNotNone(_a )
| 712 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : str = (DDPMScheduler,)
def a_ ( self , **lowercase_ ) -> Union[str, Any]:
UpperCAmelCase = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**lowercase_ )
return config
def a_ ( self ) -> int:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def a_ ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def a_ ( self ) -> Dict:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def a_ ( self ) -> Union[str, Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowercase_ )
def a_ ( self ) -> Optional[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def a_ ( self ) -> Tuple:
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def a_ ( self ) -> Any:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def a_ ( self ) -> Union[str, Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowercase_ )
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.0_2 ) ) < 1E-5
def a_ ( self ) -> str:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = len(lowercase_ )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
UpperCAmelCase = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase = pred_prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = len(lowercase_ )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
UpperCAmelCase = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase = pred_prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowercase_ )
UpperCAmelCase = scheduler.timesteps
for i, timestep in enumerate(lowercase_ ):
if i == len(lowercase_ ) - 1:
UpperCAmelCase = -1
else:
UpperCAmelCase = timesteps[i + 1]
UpperCAmelCase = scheduler.previous_timestep(lowercase_ )
UpperCAmelCase = prev_t.item()
self.assertEqual(lowercase_ , lowercase_ )
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowercase_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=lowercase_ )
def a_ ( self ) -> List[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [1_0_0, 8_7, 5_0, 1, 0]
UpperCAmelCase = len(lowercase_ )
with self.assertRaises(lowercase_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=lowercase_ , timesteps=lowercase_ )
def a_ ( self ) -> str:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=lowercase_ )
| 183 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Any = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase : str = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = ["""MobileNetV2FeatureExtractor"""]
_UpperCamelCase : int = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_UpperCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 708 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
_UpperCamelCase : Any = logging.getLogger(__name__)
def __UpperCamelCase ( snake_case ) -> Dict:
'''simple docstring'''
__A = git.Repo(search_parent_directories=snake_case )
__A = {
'''repo_id''': str(snake_case ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(snake_case , '''git_log.json''' ) , '''w''' ) as f:
json.dump(snake_case , snake_case , indent=4 )
def __UpperCamelCase ( snake_case ) -> List[str]:
'''simple docstring'''
if params.n_gpu <= 0:
__A = 0
__A = -1
__A = True
__A = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
__A = int(os.environ['''WORLD_SIZE'''] )
__A = int(os.environ['''N_GPU_NODE'''] )
__A = int(os.environ['''RANK'''] )
# number of nodes / node ID
__A = params.world_size // params.n_gpu_per_node
__A = params.global_rank // params.n_gpu_per_node
__A = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
__A = 1
__A = 0
__A = 0
__A = 0
__A = 1
__A = 1
__A = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__A = params.node_id == 0 and params.local_rank == 0
__A = params.n_nodes > 1
# summary
__A = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def __UpperCamelCase ( snake_case ) -> List[str]:
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 341 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case : Dict ={
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] =[
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] =[
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__snake_case : Optional[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 647 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
SCREAMING_SNAKE_CASE = {'mobilebert-uncased': 5_1_2}
SCREAMING_SNAKE_CASE = {}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = MobileBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ):
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __A ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __A ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __A ) != tokenize_chinese_chars
):
__a = getattr(__A , normalizer_state.pop("""type""" ) )
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**__A )
__a = do_lower_case
def snake_case_ ( self , __A , __A=None ):
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , __A , __A = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , __A , __A = None ):
__a = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 99 | 0 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCamelCase = False
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = 'ybelkada/fonts'
def UpperCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
requires_backends(UpperCAmelCase , ['''torch'''] )
_check_torch_version()
_a : Optional[Any] = image_tensor.unsqueeze(0 )
_a : Dict = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
_a : Tuple = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 )
_a : Union[str, Any] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image:
"""simple docstring"""
requires_backends(UpperCAmelCase , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
_a : Any = textwrap.TextWrapper(width=80 )
_a : int = wrapper.wrap(text=UpperCAmelCase )
_a : Any = '''\n'''.join(UpperCAmelCase )
if font_bytes is not None and font_path is None:
_a : Tuple = io.BytesIO(UpperCAmelCase )
elif font_path is not None:
_a : Optional[int] = font_path
else:
_a : int = hf_hub_download(UpperCAmelCase , '''Arial.TTF''' )
_a : int = ImageFont.truetype(UpperCAmelCase , encoding='''UTF-8''' , size=UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_a : Union[str, Any] = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , UpperCAmelCase ) )
_a : Optional[Any] = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
_a : int = text_width + left_padding + right_padding
_a : Union[str, Any] = text_height + top_padding + bottom_padding
_a : List[str] = Image.new('''RGB''' , (image_width, image_height) , UpperCAmelCase )
_a : str = ImageDraw.Draw(UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase )
return image
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
requires_backends(UpperCAmelCase , '''vision''' )
# Convert to PIL image if necessary
_a : str = to_pil_image(UpperCAmelCase )
_a : str = render_text(UpperCAmelCase , **UpperCAmelCase )
_a : Union[str, Any] = max(header_image.width , image.width )
_a : Optional[int] = int(image.height * (new_width / image.width) )
_a : List[str] = int(header_image.height * (new_width / header_image.width) )
_a : str = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
_a : str = to_numpy_array(UpperCAmelCase )
if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST:
_a : str = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST )
return new_image
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = ['''flattened_patches''']
def __init__( self , lowercase = True , lowercase = True , lowercase = None , lowercase = 2_048 , lowercase = False , **lowercase , ) -> None:
super().__init__(**lowercase )
_a : List[Any] = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
_a : str = do_normalize
_a : Any = do_convert_rgb
_a : int = max_patches
_a : Optional[Any] = is_vqa
def snake_case__( self , lowercase , lowercase , lowercase , **lowercase ) -> np.ndarray:
requires_backends(self.extract_flattened_patches , '''torch''' )
_check_torch_version()
# convert to torch
_a : Optional[Any] = to_channel_dimension_format(lowercase , ChannelDimension.FIRST )
_a : List[Any] = torch.from_numpy(lowercase )
_a : Optional[int] = patch_size['''height'''], patch_size['''width''']
_a : Any = get_image_size(lowercase )
# maximize scale s.t.
_a : str = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_a : Tuple = max(min(math.floor(scale * image_height / patch_height ) , lowercase ) , 1 )
_a : Optional[Any] = max(min(math.floor(scale * image_width / patch_width ) , lowercase ) , 1 )
_a : Tuple = max(num_feasible_rows * patch_height , 1 )
_a : Tuple = max(num_feasible_cols * patch_width , 1 )
_a : Optional[int] = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=lowercase , antialias=lowercase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_a : Optional[int] = torch_extract_patches(lowercase , lowercase , lowercase )
_a : str = patches.shape
_a : List[str] = patches_shape[1]
_a : Dict = patches_shape[2]
_a : Any = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_a : str = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_a : Tuple = torch.arange(lowercase ).reshape([rows, 1] ).repeat(1 , lowercase ).reshape([rows * columns, 1] )
_a : Optional[Any] = torch.arange(lowercase ).reshape([1, columns] ).repeat(lowercase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_a : Any = row_ids.to(torch.floataa )
_a : Union[str, Any] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_a : Any = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_a : Optional[Any] = torch.nn.functional.pad(lowercase , [0, 0, 0, max_patches - (rows * columns)] ).float()
_a : Dict = to_numpy_array(lowercase )
return result
def snake_case__( self , lowercase , lowercase = None , **lowercase ) -> np.ndarray:
if image.dtype == np.uinta:
_a : Dict = image.astype(np.floataa )
# take mean across the whole `image`
_a : Union[str, Any] = np.mean(lowercase )
_a : List[str] = np.std(lowercase )
_a : Tuple = max(lowercase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase , mean=lowercase , std=lowercase , **lowercase )
def snake_case__( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> ImageInput:
_a : Dict = do_normalize if do_normalize is not None else self.do_normalize
_a : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a : Optional[int] = patch_size if patch_size is not None else self.patch_size
_a : Union[str, Any] = max_patches if max_patches is not None else self.max_patches
_a : str = self.is_vqa
if kwargs.get('''data_format''' , lowercase ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
_a : Optional[int] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a : str = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
_a : Union[str, Any] = [to_numpy_array(lowercase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
_a : Tuple = kwargs.pop('''font_bytes''' , lowercase )
_a : List[str] = kwargs.pop('''font_path''' , lowercase )
if isinstance(lowercase , lowercase ):
_a : Dict = [header_text] * len(lowercase )
_a : Optional[int] = [
render_header(lowercase , header_text[i] , font_bytes=lowercase , font_path=lowercase )
for i, image in enumerate(lowercase )
]
if do_normalize:
_a : Dict = [self.normalize(image=lowercase ) for image in images]
# convert to torch tensor and permute
_a : Any = [
self.extract_flattened_patches(image=lowercase , max_patches=lowercase , patch_size=lowercase )
for image in images
]
# create attention mask in numpy
_a : Any = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_a : str = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=lowercase )
return encoded_outputs | 701 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def UpperCamelCase__ ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
_a : Tuple = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase__ ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
_a , _a : int = emb.weight.shape
_a : Optional[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
_a : Dict = emb.weight.data
return lin_layer
def UpperCamelCase__ ( UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
_a : List[Any] = torch.load(UpperCAmelCase , map_location='''cpu''' )
_a : str = Namespace(**checkpoint['''cfg''']['''model'''] )
_a : str = checkpoint['''model''']
remove_ignore_keys_(UpperCAmelCase )
_a : Optional[int] = state_dict['''decoder.embed_tokens.weight'''].shape[0]
_a : Any = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
_a : str = XGLMConfig(
vocab_size=UpperCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_a : Any = XGLMForCausalLM(UpperCAmelCase )
_a : List[Any] = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
print(UpperCAmelCase )
_a : List[str] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path) | 307 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( __lowerCamelCase ):
snake_case_ = ["""image_processor""", """tokenizer"""]
snake_case_ = """ChineseCLIPImageProcessor"""
snake_case_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : int ,A : Union[str, Any]=None ,A : Optional[Any]=None ,**A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,A ,)
UpperCAmelCase__ : Optional[Any] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A ,A )
UpperCAmelCase__ : List[str] = self.image_processor
def __call__( self : int ,A : Optional[int]=None ,A : Union[str, Any]=None ,A : str=None ,**A : Union[str, Any] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCAmelCase__ : str = self.tokenizer(A ,return_tensors=A ,**A )
if images is not None:
UpperCAmelCase__ : Optional[int] = self.image_processor(A ,return_tensors=A ,**A )
if text is not None and images is not None:
UpperCAmelCase__ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) ,tensor_type=A )
def __lowercase ( self : Tuple ,*A : int ,**A : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A ,**A )
def __lowercase ( self : str ,*A : Optional[int] ,**A : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*A ,**A )
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.tokenizer.model_input_names
UpperCAmelCase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,A ,)
return self.image_processor_class
| 65 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Any , __SCREAMING_SNAKE_CASE : int = 14 ) -> None:
if group not in primes:
raise ValueError('''Unsupported Group''' )
a_ : Union[str, Any] = primes[group]['''prime''']
a_ : List[str] = primes[group]['''generator''']
a_ : str = int(hexlify(urandom(32 ) ) , base=16 )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
return hex(self.__private_key )[2:]
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
a_ : Dict = pow(self.generator , self.__private_key , self.prime )
return hex(__SCREAMING_SNAKE_CASE )[2:]
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__SCREAMING_SNAKE_CASE , (self.prime - 1) // 2 , self.prime ) == 1
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> str:
a_ : str = int(__SCREAMING_SNAKE_CASE , base=16 )
if not self.is_valid_public_key(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid public key''' )
a_ : Optional[int] = pow(__SCREAMING_SNAKE_CASE , self.__private_key , self.prime )
return shaaaa(str(__SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
@staticmethod
def SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__SCREAMING_SNAKE_CASE , (prime - 1) // 2 , __SCREAMING_SNAKE_CASE ) == 1
)
@staticmethod
def SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 14 ) -> str:
a_ : Tuple = int(__SCREAMING_SNAKE_CASE , base=16 )
a_ : Any = int(__SCREAMING_SNAKE_CASE , base=16 )
a_ : List[str] = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid public key''' )
a_ : Optional[int] = pow(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return shaaaa(str(__SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 466 | 0 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30522, type=int)
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, "rb") as fp:
SCREAMING_SNAKE_CASE : Tuple = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
SCREAMING_SNAKE_CASE : Optional[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
SCREAMING_SNAKE_CASE : Tuple = [0] * args.vocab_size
for k, v in counter.items():
SCREAMING_SNAKE_CASE : Any = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 138 | class UpperCamelCase :
def __init__(self , __UpperCamelCase ) -> None:
UpperCamelCase_ : Tuple = len(__UpperCamelCase )
UpperCamelCase_ : List[str] = [0] * len_array
if len_array > 0:
UpperCamelCase_ : int = array[0]
for i in range(1 , __UpperCamelCase ):
UpperCamelCase_ : Tuple = self.prefix_sum[i - 1] + array[i]
def A_ (self , __UpperCamelCase , __UpperCamelCase ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A_ (self , __UpperCamelCase ) -> bool:
UpperCamelCase_ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__UpperCamelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 | 1 |
import re
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
if len(re.findall('[ATCG]' , SCREAMING_SNAKE_CASE ) ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE: Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class lowercase_ (SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , **snake_case__ : str ):
"""simple docstring"""
super().__init__(**snake_case__ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : str , snake_case__ : Union[str, List[str], "Image", List["Image"]] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
return super().__call__(snake_case__ , **snake_case__ )
def __a ( self : str , **snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __a ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=None , snake_case__ : List[str]="This is a photo of {}." ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_image(snake_case__ )
SCREAMING_SNAKE_CASE_ = self.image_processor(images=[image] , return_tensors=self.framework )
SCREAMING_SNAKE_CASE_ = candidate_labels
SCREAMING_SNAKE_CASE_ = [hypothesis_template.format(snake_case__ ) for x in candidate_labels]
SCREAMING_SNAKE_CASE_ = self.tokenizer(snake_case__ , return_tensors=self.framework , padding=snake_case__ )
SCREAMING_SNAKE_CASE_ = [text_inputs]
return inputs
def __a ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = model_inputs.pop('candidate_labels' )
SCREAMING_SNAKE_CASE_ = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , snake_case__ ):
SCREAMING_SNAKE_CASE_ = text_inputs[0]
else:
# Batching case.
SCREAMING_SNAKE_CASE_ = text_inputs[0][0]
SCREAMING_SNAKE_CASE_ = self.model(**snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE_ = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = model_outputs.pop('candidate_labels' )
SCREAMING_SNAKE_CASE_ = model_outputs['logits'][0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ = logits.softmax(dim=-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = probs.tolist()
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE_ = [scores]
elif self.framework == "tf":
SCREAMING_SNAKE_CASE_ = stable_softmax(snake_case__ , axis=-1 )
SCREAMING_SNAKE_CASE_ = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
SCREAMING_SNAKE_CASE_ = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(snake_case__ , snake_case__ ) , key=lambda snake_case__ : -x[0] )
]
return result | 360 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int , lowercase : int ) -> int:
"""simple docstring"""
snake_case : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case : Optional[Any] = n - k
# Calculate C(n,k)
for i in range(lowercase ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( lowercase : int ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , lowercase ) // (node_count + 1)
def __lowerCAmelCase ( lowercase : int ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError("factorial() not defined for negative values" )
snake_case : int = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( lowercase : int ) -> int:
"""simple docstring"""
return catalan_number(lowercase ) * factorial(lowercase )
if __name__ == "__main__":
__snake_case = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 117 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Optional[Any] = '''big_bird'''
def __init__( self , UpperCamelCase__=5_0358 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu_new" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=4096 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=True , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=2 , UpperCamelCase__=66 , UpperCamelCase__="block_sparse" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=64 , UpperCamelCase__=3 , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , sep_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case : Union[str, Any] = vocab_size
snake_case : List[Any] = max_position_embeddings
snake_case : int = hidden_size
snake_case : str = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : int = intermediate_size
snake_case : Union[str, Any] = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : List[str] = type_vocab_size
snake_case : Optional[Any] = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : List[Any] = rescale_embeddings
snake_case : Any = attention_type
snake_case : List[Any] = use_bias
snake_case : int = block_size
snake_case : int = num_random_blocks
snake_case : Optional[int] = classifier_dropout
class _lowerCAmelCase ( snake_case_ ):
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 117 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def snake_case ( _a: int , _a: int , _a: Dict )-> Dict:
'''simple docstring'''
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = val
def snake_case ( _a: List[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase__ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
return new_state_dict
def snake_case ( _a: int , _a: int=False )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = """"""
if is_panoptic:
lowerCamelCase__ = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
lowerCamelCase__ = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[:256, :]
lowerCamelCase__ = in_proj_bias[:256]
lowerCamelCase__ = in_proj_weight[256:512, :]
lowerCamelCase__ = in_proj_bias[256:512]
lowerCamelCase__ = in_proj_weight[-256:, :]
lowerCamelCase__ = in_proj_bias[-256:]
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def snake_case ( _a: Tuple , _a: int )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCamelCase__ = """resnet101"""
if "dc5" in model_name:
lowerCamelCase__ = True
lowerCamelCase__ = """panoptic""" in model_name
if is_panoptic:
lowerCamelCase__ = 250
else:
lowerCamelCase__ = 91
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """coco-detection-id2label.json"""
lowerCamelCase__ = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ = {int(a__ ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
# load image processor
lowerCamelCase__ = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowerCamelCase__ = ConditionalDetrImageProcessor(format=a__ )
# prepare image
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=a__ , return_tensors='pt' )
lowerCamelCase__ = encoding["""pixel_values"""]
logger.info(F'Converting model {model_name}...' )
# load original model from torch hub
lowerCamelCase__ = torch.hub.load('DeppMeng/ConditionalDETR' , a__ , pretrained=a__ ).eval()
lowerCamelCase__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCamelCase__ = """conditional_detr.""" + src
rename_key(a__ , a__ , a__ )
lowerCamelCase__ = rename_backbone_keys(a__ )
# query, key and value matrices need special treatment
read_in_q_k_v(a__ , is_panoptic=a__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase__ = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = val
# finally, create HuggingFace model and load state dict
lowerCamelCase__ = ConditionalDetrForSegmentation(a__ ) if is_panoptic else ConditionalDetrForObjectDetection(a__ )
model.load_state_dict(a__ )
model.eval()
model.push_to_hub(repo_id=a__ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
lowerCamelCase__ = conditional_detr(a__ )
lowerCamelCase__ = model(a__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_snake_case = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 510 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 0 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ :Tuple = logging.get_logger(__name__)
lowercase__ :Optional[Any] = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : List[str] = 'conditional_detr'
_A : Dict = ['past_key_values']
_A : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Dict , __lowercase : Union[str, Any]=True , __lowercase : List[str]=None , __lowercase : Tuple=3 , __lowercase : Dict=300 , __lowercase : Tuple=6 , __lowercase : Optional[int]=2_048 , __lowercase : Tuple=8 , __lowercase : Optional[Any]=6 , __lowercase : List[str]=2_048 , __lowercase : Tuple=8 , __lowercase : Union[str, Any]=0.0 , __lowercase : Union[str, Any]=0.0 , __lowercase : Dict=True , __lowercase : Union[str, Any]="relu" , __lowercase : Union[str, Any]=256 , __lowercase : str=0.1 , __lowercase : Dict=0.0 , __lowercase : List[str]=0.0 , __lowercase : str=0.0_2 , __lowercase : List[str]=1.0 , __lowercase : Union[str, Any]=False , __lowercase : Tuple="sine" , __lowercase : List[Any]="resnet50" , __lowercase : int=True , __lowercase : str=False , __lowercase : Any=2 , __lowercase : Optional[Any]=5 , __lowercase : Dict=2 , __lowercase : int=1 , __lowercase : Union[str, Any]=1 , __lowercase : Any=2 , __lowercase : List[Any]=5 , __lowercase : int=2 , __lowercase : int=0.2_5 , **__lowercase : List[Any] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__UpperCAmelCase : Union[str, Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : str = backbone_config.get('''model_type''' )
__UpperCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : Union[str, Any] = config_class.from_dict(__lowercase )
__UpperCAmelCase : List[str] = use_timm_backbone
__UpperCAmelCase : Any = backbone_config
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[Any] = d_model
__UpperCAmelCase : str = encoder_ffn_dim
__UpperCAmelCase : Optional[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = encoder_attention_heads
__UpperCAmelCase : Any = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : Union[str, Any] = decoder_attention_heads
__UpperCAmelCase : Optional[Any] = dropout
__UpperCAmelCase : Union[str, Any] = attention_dropout
__UpperCAmelCase : Union[str, Any] = activation_dropout
__UpperCAmelCase : Tuple = activation_function
__UpperCAmelCase : List[Any] = init_std
__UpperCAmelCase : Union[str, Any] = init_xavier_std
__UpperCAmelCase : Tuple = encoder_layerdrop
__UpperCAmelCase : List[str] = decoder_layerdrop
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : Optional[int] = auxiliary_loss
__UpperCAmelCase : Optional[int] = position_embedding_type
__UpperCAmelCase : Tuple = backbone
__UpperCAmelCase : List[Any] = use_pretrained_backbone
__UpperCAmelCase : Optional[int] = dilation
# Hungarian matcher
__UpperCAmelCase : List[Any] = class_cost
__UpperCAmelCase : int = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Dict = mask_loss_coefficient
__UpperCAmelCase : Tuple = dice_loss_coefficient
__UpperCAmelCase : Tuple = cls_loss_coefficient
__UpperCAmelCase : str = bbox_loss_coefficient
__UpperCAmelCase : Tuple = giou_loss_coefficient
__UpperCAmelCase : List[str] = focal_alpha
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def A_ ( self : Dict ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def A_ ( self : Any ):
'''simple docstring'''
return self.d_model
def A_ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__UpperCAmelCase : List[Any] = self.backbone_config.to_dict()
__UpperCAmelCase : str = self.__class__.model_type
return output
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : List[str] = version.parse('1.11' )
@property
def A_ ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def A_ ( self : str ):
'''simple docstring'''
return 1e-5
@property
def A_ ( self : Tuple ):
'''simple docstring'''
return 12 | 703 |
"""simple docstring"""
def lowerCamelCase_ ( ) ->str:
"""simple docstring"""
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Any = 2
while i * i <= n:
__UpperCAmelCase : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase_ ( ) ->Optional[int]:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(UpperCAmelCase_ ) > 5_00 )
if __name__ == "__main__":
print(solution()) | 374 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A (__UpperCAmelCase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE = ProphetNetTokenizer
_SCREAMING_SNAKE_CASE = False
def __a ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
_snake_case : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __a ( self , lowercase_ ) -> Dict:
'''simple docstring'''
_snake_case : Any = '''UNwant\u00E9d,running'''
_snake_case : Union[str, Any] = '''unwanted, running'''
return input_text, output_text
def __a ( self ) -> Dict:
'''simple docstring'''
_snake_case : str = self.tokenizer_class(self.vocab_file )
_snake_case : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [9, 6, 7, 12, 10, 11] )
def __a ( self ) -> List[str]:
'''simple docstring'''
_snake_case : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __a ( self ) -> List[str]:
'''simple docstring'''
_snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
_snake_case : Any = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __a ( self ) -> Tuple:
'''simple docstring'''
_snake_case : Optional[Any] = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __a ( self ) -> Tuple:
'''simple docstring'''
_snake_case : int = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
_snake_case : List[str] = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __a ( self ) -> Dict:
'''simple docstring'''
_snake_case : List[Any] = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
_snake_case : Any = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
_snake_case : str = BasicTokenizer(do_lower_case=lowerCamelCase_ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __a ( self ) -> List[Any]:
'''simple docstring'''
_snake_case : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_snake_case : int = {}
for i, token in enumerate(lowerCamelCase_ ):
_snake_case : int = i
_snake_case : List[str] = WordpieceTokenizer(vocab=lowerCamelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def __a ( self ) -> List[str]:
'''simple docstring'''
_snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
_snake_case : int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_snake_case : str = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
_snake_case : List[str] = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[int] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __a ( self ) -> int:
'''simple docstring'''
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __a ( self ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def __a ( self ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
_snake_case : List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase_ )
_snake_case : Dict = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase_ )
_snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
_snake_case : Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 326 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _lowercase :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=13 ,lowerCamelCase_=7 ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=99 ,lowerCamelCase_=32 ,lowerCamelCase_=2 ,lowerCamelCase_=4 ,lowerCamelCase_=37 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=512 ,lowerCamelCase_=16 ,lowerCamelCase_=2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=3 ,lowerCamelCase_=4 ,lowerCamelCase_=None ,) -> str:
'''simple docstring'''
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : str = 13
UpperCAmelCase__ : Any = 7
UpperCAmelCase__ : str = True
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : List[Any] = 99
UpperCAmelCase__ : str = 32
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Union[str, Any] = 4
UpperCAmelCase__ : Dict = 37
UpperCAmelCase__ : Dict = '''gelu'''
UpperCAmelCase__ : List[str] = 0.1
UpperCAmelCase__ : List[str] = 0.1
UpperCAmelCase__ : Optional[int] = 512
UpperCAmelCase__ : Any = 16
UpperCAmelCase__ : List[Any] = 2
UpperCAmelCase__ : Optional[Any] = 0.02
UpperCAmelCase__ : Optional[int] = 3
UpperCAmelCase__ : Optional[int] = 4
UpperCAmelCase__ : Optional[int] = None
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : List[str] = None
if self.use_input_mask:
UpperCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : str = None
if self.use_token_type_ids:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase__ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,return_dict=lowerCamelCase_ ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = TFRoFormerModel(config=lowerCamelCase_ )
UpperCAmelCase__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__ : str = [input_ids, input_mask]
UpperCAmelCase__ : Any = model(lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Any = TFRoFormerForCausalLM(config=lowerCamelCase_ )
UpperCAmelCase__ : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase__ : int = model(lowerCamelCase_ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) ,[self.batch_size, self.seq_length, self.vocab_size] )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFRoFormerForMaskedLM(config=lowerCamelCase_ )
UpperCAmelCase__ : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : List[Any] = TFRoFormerForSequenceClassification(config=lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase__ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : int = self.num_choices
UpperCAmelCase__ : Optional[int] = TFRoFormerForMultipleChoice(config=lowerCamelCase_ )
UpperCAmelCase__ : Tuple = tf.tile(tf.expand_dims(lowerCamelCase_ ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase__ : Optional[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase__ : Dict = tf.tile(tf.expand_dims(lowerCamelCase_ ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase__ : Optional[int] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase__ : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Dict = self.num_labels
UpperCAmelCase__ : Optional[Any] = TFRoFormerForTokenClassification(config=lowerCamelCase_ )
UpperCAmelCase__ : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = TFRoFormerForQuestionAnswering(config=lowerCamelCase_ )
UpperCAmelCase__ : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase__ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ : int = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Optional[int] = False
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFRoFormerModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCAmelCase__ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : Union[str, Any] = model(lowerCamelCase_ )[0]
# TODO Replace vocab size
UpperCAmelCase__ : List[str] = 50000
UpperCAmelCase__ : int = [1, 6, vocab_size]
self.assertEqual(output.shape ,lowerCamelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCAmelCase__ : Tuple = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,lowerCamelCase_ ,atol=1e-4 )
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = 1E-4
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = tf.constant([[4, 10]] )
UpperCAmelCase__ : List[str] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 ,embedding_dim=6 )
UpperCAmelCase__ : Optional[Any] = emba(input_ids.shape )
UpperCAmelCase__ : List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(lowerCamelCase_ ,lowerCamelCase_ ,atol=self.tolerance )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Any = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
UpperCAmelCase__ : Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 ,embedding_dim=512 )
emba([2, 16, 512] )
UpperCAmelCase__ : Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(lowerCamelCase_ ,lowerCamelCase_ ,atol=self.tolerance )
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = 1E-4
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 ,dtype=tf.floataa ) ,shape=(2, 12, 16, 64) ) / 100
UpperCAmelCase__ : int = -tf.reshape(tf.range(2 * 12 * 16 * 64 ,dtype=tf.floataa ) ,shape=(2, 12, 16, 64) ) / 100
UpperCAmelCase__ : Optional[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 ,embedding_dim=64 )
UpperCAmelCase__ : int = embed_positions([2, 16, 768] )[None, None, :, :]
UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase__ : Any = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
UpperCAmelCase__ : List[str] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] ,lowerCamelCase_ ,atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] ,lowerCamelCase_ ,atol=self.tolerance )
| 614 | 0 |
def lowerCAmelCase__ ( a__ ) ->list:
'''simple docstring'''
if len(a__ ) <= 1:
return lst
_UpperCamelCase = 1
while i < len(a__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCamelCase , _UpperCamelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCamelCase = 1
return lst
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 82 | from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : Union[str, Any] = """▁"""
lowercase : str = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowercase : List[str] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
lowercase : str = {
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
lowercase : Union[str, Any] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = ["input_ids", "attention_mask"]
_A = []
_A = []
def __init__( self : str , A_ : Any , A_ : int="<s>" , A_ : int="</s>" , A_ : Dict="</s>" , A_ : List[str]="<s>" , A_ : List[str]="<unk>" , A_ : Optional[int]="<pad>" , A_ : Union[str, Any]="<mask>" , A_ : str=None , A_ : Union[str, Any]=None , A_ : Dict=None , A_ : Optional[Dict[str, Any]] = None , A_ : List[str]=None , A_ : List[str]=False , **A_ : Union[str, Any] , ) -> int:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_: str = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
lowerCamelCase_: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase_: Optional[Any] = legacy_behaviour
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , tokenizer_file=A_ , src_lang=A_ , tgt_lang=A_ , additional_special_tokens=A_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=A_ , **A_ , )
lowerCamelCase_: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
lowerCamelCase_: Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase_: List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase_: Any = 1
lowerCamelCase_: List[Any] = len(self.sp_model )
lowerCamelCase_: str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A_ )
}
lowerCamelCase_: List[str] = {v: k for k, v in self.lang_code_to_id.items()}
lowerCamelCase_: str = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCamelCase_: Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCamelCase_: Any = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowerCamelCase_: Union[str, Any] = src_lang if src_lang is not None else """eng_Latn"""
lowerCamelCase_: Optional[int] = self.lang_code_to_id[self._src_lang]
lowerCamelCase_: List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = self.__dict__.copy()
lowerCamelCase_: Optional[Any] = None
lowerCamelCase_: Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , A_ : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase_: Dict = {}
lowerCamelCase_: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCAmelCase ( self : str , A_ : str ) -> None:
"""simple docstring"""
lowerCamelCase_: Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
lowerCamelCase_: Optional[int] = [1] * len(self.prefix_tokens )
lowerCamelCase_: str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def lowerCAmelCase ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase ( self : List[str] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = [self.sep_token_id]
lowerCamelCase_: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : Union[str, Any] , A_ : Any , A_ : str , A_ : Optional[str] , A_ : Optional[str] , **A_ : str ) -> Optional[int]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCamelCase_: Dict = src_lang
lowerCamelCase_: Tuple = self(A_ , add_special_tokens=A_ , return_tensors=A_ , **A_ )
lowerCamelCase_: Optional[Any] = self.convert_tokens_to_ids(A_ )
lowerCamelCase_: Tuple = tgt_lang_id
return inputs
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: str = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : Optional[int] , A_ : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def lowerCAmelCase ( self : Optional[int] , A_ : Optional[Any] ) -> Any:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_: Dict = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase ( self : Optional[Any] , A_ : List[str] ) -> List[Any]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase ( self : Dict , A_ : int ) -> str:
"""simple docstring"""
lowerCamelCase_: Dict = """""".join(A_ ).replace(A_ , """ """ ).strip()
return out_string
def lowerCAmelCase ( self : str , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_: Optional[Any] = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , """wb""" ) as fi:
lowerCamelCase_: List[Any] = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def lowerCAmelCase ( self : Union[str, Any] , A_ : List[str] , A_ : str = "eng_Latn" , A_ : Optional[List[str]] = None , A_ : str = "fra_Latn" , **A_ : List[str] , ) -> BatchEncoding:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = src_lang
lowerCamelCase_: Dict = tgt_lang
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase ( self : List[str] , A_ : Any ) -> None:
"""simple docstring"""
lowerCamelCase_: List[str] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowerCamelCase_: Any = []
lowerCamelCase_: Dict = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase_: Tuple = [self.cur_lang_code]
lowerCamelCase_: int = [self.eos_token_id]
def lowerCAmelCase ( self : Dict , A_ : str ) -> None:
"""simple docstring"""
lowerCamelCase_: str = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowerCamelCase_: Optional[Any] = []
lowerCamelCase_: Tuple = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase_: Dict = [self.cur_lang_code]
lowerCamelCase_: Union[str, Any] = [self.eos_token_id]
| 423 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Tuple = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "vit_mae"
def __init__( self : Optional[int] , A_ : Union[str, Any]=7_68 , A_ : List[Any]=12 , A_ : Union[str, Any]=12 , A_ : Optional[int]=30_72 , A_ : Any="gelu" , A_ : List[str]=0.0 , A_ : Optional[Any]=0.0 , A_ : Any=0.02 , A_ : Union[str, Any]=1e-12 , A_ : int=2_24 , A_ : List[str]=16 , A_ : List[str]=3 , A_ : Tuple=True , A_ : List[str]=16 , A_ : Tuple=5_12 , A_ : int=8 , A_ : List[Any]=20_48 , A_ : Optional[Any]=0.75 , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_: List[str] = hidden_size
lowerCamelCase_: Optional[Any] = num_hidden_layers
lowerCamelCase_: Dict = num_attention_heads
lowerCamelCase_: Tuple = intermediate_size
lowerCamelCase_: Optional[Any] = hidden_act
lowerCamelCase_: Union[str, Any] = hidden_dropout_prob
lowerCamelCase_: Dict = attention_probs_dropout_prob
lowerCamelCase_: Tuple = initializer_range
lowerCamelCase_: Union[str, Any] = layer_norm_eps
lowerCamelCase_: Optional[Any] = image_size
lowerCamelCase_: Dict = patch_size
lowerCamelCase_: Any = num_channels
lowerCamelCase_: Dict = qkv_bias
lowerCamelCase_: List[Any] = decoder_num_attention_heads
lowerCamelCase_: Any = decoder_hidden_size
lowerCamelCase_: List[str] = decoder_num_hidden_layers
lowerCamelCase_: int = decoder_intermediate_size
lowerCamelCase_: Tuple = mask_ratio
lowerCamelCase_: int = norm_pix_loss
| 423 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
A_ = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = size if size is not None else {'shortest_edge': 224}
lowerCamelCase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 256, 'width': 256}
lowerCamelCase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='crop_size' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_flip_channel_order
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase_ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> str:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> np.ndarray:
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ) -> PIL.Image.Image:
'''simple docstring'''
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='crop_size' )
lowerCamelCase_ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowerCamelCase_ = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCamelCase_ = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = target_sizes.numpy()
lowerCamelCase_ = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCamelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase_ = logits.argmax(dim=1 )
lowerCamelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 384 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A_ = "src/transformers"
A_ = "docs/source/en/tasks"
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
with open(__UpperCamelCase ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
lowerCamelCase_ = f.readlines()
# Find the start prompt.
lowerCamelCase_ = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
lowerCamelCase_ = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A_ = direct_transformers_import(TRANSFORMERS_PATH)
A_ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A_ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCamelCase_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() )
lowerCamelCase_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ) -> int:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = _find_text_in_file(
filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' ,end_prompt='<!--End of the generated tip-->' ,)
lowerCamelCase_ = get_model_list_for_task(__UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,'w' ,encoding='utf-8' ,newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
A_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 384 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["image_processor", "tokenizer"]
lowercase_ = "BlipImageProcessor"
lowercase_ = "AutoTokenizer"
def __init__( self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_ = False
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self : Dict , _lowerCAmelCase : ImageInput = None , _lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 0 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , **_lowerCAmelCase : Any , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE_ = self.tokenizer
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE_ = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
else:
SCREAMING_SNAKE_CASE_ = None
if text_encoding is not None:
encoding_image_processor.update(_lowerCAmelCase )
return encoding_image_processor
def lowerCAmelCase_ ( self : int , *_lowerCAmelCase : int , **_lowerCAmelCase : Optional[int] ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Optional[Any] ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 31 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE: Any = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE: List[Any] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 360 | 0 |
"""simple docstring"""
import os
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
A__ : Optional[Any] =len(grid[0] )
A__ : List[str] =len(UpperCamelCase )
A__ : List[Any] =0
A__ : List[Any] =0
A__ : List[str] =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(UpperCamelCase ):
for j in range(n_rows - 3 ):
A__ : List[Any] =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
A__ : Union[str, Any] =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
A__ : int =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
A__ : Union[str, Any] =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
A__ : str =max(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if max_product > largest:
A__ : List[str] =max_product
return largest
def lowercase ( ):
"""simple docstring"""
A__ : Any =[]
with open(os.path.dirname(UpperCamelCase ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
A__ : str =[[int(UpperCamelCase ) for i in grid[j]] for j in range(len(UpperCamelCase ) )]
return largest_product(UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 595 | """simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : int = 16 , UpperCamelCase__ : int = 88 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 1 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , ):
super().__init__()
A__ : Dict =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , in_channels=UpperCamelCase__ , num_layers=UpperCamelCase__ , dropout=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , cross_attention_dim=UpperCamelCase__ , attention_bias=UpperCamelCase__ , sample_size=UpperCamelCase__ , num_vector_embeds=UpperCamelCase__ , activation_fn=UpperCamelCase__ , num_embeds_ada_norm=UpperCamelCase__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A__ : Dict =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A__ : List[str] =[77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A__ : Union[str, Any] =[1, 0]
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : bool = True , ):
A__ : Optional[Any] =hidden_states
A__ : Union[str, Any] =[]
A__ : str =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A__ : Dict =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A__ : str =self.transformer_index_for_condition[i]
A__ : Optional[int] =self.transformers[transformer_index](
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , timestep=UpperCamelCase__ , cross_attention_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A__ : Union[str, Any] =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A__ : Dict =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCamelCase__ )
| 595 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Tuple ):
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class UpperCamelCase ( __a ):
a__ :str = ['''pixel_values''']
def __init__(self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 255 , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> None:
super().__init__(**__UpperCamelCase )
UpperCamelCase_ : Optional[int] = size if size is not None else {"""shortest_edge""": 224}
UpperCamelCase_ : List[str] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase_ : int = get_size_dict(__UpperCamelCase , param_name="""crop_size""" )
UpperCamelCase_ : List[Any] = do_resize
UpperCamelCase_ : str = size
UpperCamelCase_ : Tuple = do_center_crop
UpperCamelCase_ : int = crop_size
UpperCamelCase_ : List[str] = resample
UpperCamelCase_ : Dict = do_rescale
UpperCamelCase_ : List[str] = rescale_factor
UpperCamelCase_ : List[str] = do_normalize
UpperCamelCase_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase_ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , ) -> np.ndarray:
UpperCamelCase_ : Union[str, Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCamelCase_ : List[Any] = get_resize_output_image_size(__UpperCamelCase , size["""shortest_edge"""] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCamelCase_ : Tuple = (size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ) -> np.ndarray:
UpperCamelCase_ : Any = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ) -> int:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ) -> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def A_ (self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase_ : Dict = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCamelCase_ : Dict = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCamelCase_ : List[Any] = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCamelCase_ : str = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase )
if do_normalize:
UpperCamelCase_ : str = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCamelCase_ : List[Any] = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def A_ (self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ) -> PIL.Image.Image:
UpperCamelCase_ : str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ : int = resample if resample is not None else self.resample
UpperCamelCase_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCamelCase_ : Optional[int] = size if size is not None else self.size
UpperCamelCase_ : Any = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCamelCase_ : str = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ : Any = get_size_dict(__UpperCamelCase , param_name="""crop_size""" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
UpperCamelCase_ : Any = make_batched(__UpperCamelCase )
UpperCamelCase_ : Any = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCamelCase_ : Any = {"""pixel_values""": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 635 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 635 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase_ : int = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
lowerCamelCase_ : Optional[int] = {
"169M": 768,
"430M": 1_024,
"1B5": 2_048,
"3B": 2_560,
"7B": 4_096,
"14B": 5_120,
}
def __lowercase( __snake_case : Optional[Any] ) -> Union[str, Any]:
__snake_case = list(state_dict.keys() )
for name in state_dict_keys:
__snake_case = state_dict.pop(__snake_case )
# emb -> embedding
if name.startswith('emb.' ):
__snake_case = name.replace('emb.' ,'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__snake_case = name.replace('blocks.0.ln0' ,'blocks.0.pre_ln' )
# att -> attention
__snake_case = re.sub(r'blocks\.(\d+)\.att' ,r'blocks.\1.attention' ,__snake_case )
# ffn -> feed_forward
__snake_case = re.sub(r'blocks\.(\d+)\.ffn' ,r'blocks.\1.feed_forward' ,__snake_case )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__snake_case = name.replace('.time_mix_k' ,'.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__snake_case = name.replace('.time_mix_v' ,'.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__snake_case = name.replace('.time_mix_r' ,'.time_mix_receptance' )
if name != "head.weight":
__snake_case = 'rwkv.' + name
__snake_case = weight
return state_dict
def __lowercase( __snake_case : List[Any] ,__snake_case : List[str] ,__snake_case : Tuple ,__snake_case : List[Any]=None ,__snake_case : Optional[Any]=None ,__snake_case : Optional[int]=False ,__snake_case : Union[str, Any]=None ) -> List[str]:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__snake_case = 5_02_77
__snake_case = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__snake_case = PreTrainedTokenizerFast(tokenizer_file=__snake_case )
__snake_case = len(__snake_case )
tokenizer.save_pretrained(__snake_case )
# 2. Build the config
__snake_case = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__snake_case = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
__snake_case = RwkvConfig(
vocab_size=__snake_case ,num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] ,hidden_size=HIDEN_SIZE_MAPPING[size] ,)
config.save_pretrained(__snake_case )
# 3. Download model file then convert state_dict
__snake_case = hf_hub_download(__snake_case ,__snake_case )
__snake_case = torch.load(__snake_case ,map_location='cpu' )
__snake_case = convert_state_dict(__snake_case )
# 4. Split in shards and save
__snake_case , __snake_case = shard_checkpoint(__snake_case )
for shard_file, shard in shards.items():
torch.save(__snake_case ,os.path.join(__snake_case ,__snake_case ) )
if index is not None:
__snake_case = os.path.join(__snake_case ,__snake_case )
# Save the index as well
with open(__snake_case ,'w' ,encoding='utf-8' ) as f:
__snake_case = json.dumps(__snake_case ,indent=2 ,sort_keys=__snake_case ) + '\n'
f.write(__snake_case )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__snake_case = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__snake_case = torch.load(os.path.join(__snake_case ,__snake_case ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} ,os.path.join(__snake_case ,__snake_case ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__snake_case = AutoModelForCausalLM.from_pretrained(__snake_case )
model.push_to_hub(__snake_case ,max_shard_size='2GB' )
tokenizer.push_to_hub(__snake_case )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
lowerCamelCase_ : Any = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 345 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCamelCase (lowerCamelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
super().__init__()
self.register_modules(vqvae=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ):
__snake_case = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE_ , )
__snake_case = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case = {}
if accepts_eta:
__snake_case = eta
for t in self.progress_bar(self.scheduler.timesteps ):
__snake_case = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
__snake_case = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# decode the image latents with the VAE
__snake_case = self.vqvae.decode(SCREAMING_SNAKE_CASE_ ).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 345 | 1 |
'''simple docstring'''
import numpy as np
class __a :
def __init__( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
def __eq__( self : int ,lowerCamelCase : List[str] ):
'''simple docstring'''
return self.position == cell.position
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
print(self.position )
class __a :
def __init__( self : int ,lowerCamelCase : Optional[int]=(5, 5) ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.zeros(lowerCamelCase )
__SCREAMING_SNAKE_CASE = world_size[0]
__SCREAMING_SNAKE_CASE = world_size[1]
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
print(self.w )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__SCREAMING_SNAKE_CASE = cell.position[0]
__SCREAMING_SNAKE_CASE = cell.position[1]
__SCREAMING_SNAKE_CASE = []
for n in neughbour_cord:
__SCREAMING_SNAKE_CASE = current_x + n[0]
__SCREAMING_SNAKE_CASE = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__SCREAMING_SNAKE_CASE = Cell()
__SCREAMING_SNAKE_CASE = (x, y)
__SCREAMING_SNAKE_CASE = cell
neighbours.append(lowerCamelCase )
return neighbours
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
_open.append(__UpperCAmelCase )
while _open:
__SCREAMING_SNAKE_CASE = np.argmin([n.f for n in _open] )
__SCREAMING_SNAKE_CASE = _open[min_f]
_closed.append(_open.pop(__UpperCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(__UpperCAmelCase ):
for c in _closed:
if c == n:
continue
__SCREAMING_SNAKE_CASE = current.g + 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = n.position
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = goal.position
__SCREAMING_SNAKE_CASE = (ya - ya) ** 2 + (xa - xa) ** 2
__SCREAMING_SNAKE_CASE = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
while current.parent is not None:
path.append(current.position )
__SCREAMING_SNAKE_CASE = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
a = Gridworld()
# Start position and goal
a = Cell()
a = (0, 0)
a = Cell()
a = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
a = astar(world, start, goal)
# Just for visual reasons.
for i in s:
a = 1
print(world.w)
| 109 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , __UpperCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
assert _test_patching.open is open
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , __UpperCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __magic_name__ ( ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , __UpperCAmelCase ):
pass
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , __UpperCAmelCase ) is None
with patch_submodule(_test_patching , """len""" , __UpperCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_start_and_stop_mock__"""
__SCREAMING_SNAKE_CASE = patch_submodule(_test_patching , """open""" , __UpperCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_successive_join__"""
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_successive_dirname__"""
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , __UpperCAmelCase ):
with patch_submodule(_test_patching , """os.rename""" , __UpperCAmelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , __UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , __UpperCAmelCase ):
with patch_submodule(_test_patching , """os.path.join""" , __UpperCAmelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , __UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , __UpperCAmelCase ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , __UpperCAmelCase ):
pass
| 109 | 1 |
'''simple docstring'''
import torch
from torch import nn
class _a ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=1 , lowercase_=False ) -> Dict:
super().__init__()
lowerCAmelCase : Optional[int] = n_token
lowerCAmelCase : Optional[int] = d_embed
lowerCAmelCase : str = d_proj
lowerCAmelCase : str = cutoffs + [n_token]
lowerCAmelCase : Any = [0] + self.cutoffs
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : Any = self.cutoffs[0]
lowerCAmelCase : Optional[int] = len(self.cutoffs ) - 1
lowerCAmelCase : Optional[Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase : str = nn.ModuleList()
lowerCAmelCase : Optional[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase_ , lowercase_ ) ) )
else:
self.out_projs.append(lowercase_ )
self.out_layers.append(nn.Linear(lowercase_ , lowercase_ ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase : Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase_ , lowercase_ ) ) )
self.out_layers.append(nn.Linear(lowercase_ , r_idx - l_idx ) )
lowerCAmelCase : Any = keep_order
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
if proj is None:
lowerCAmelCase : List[str] = nn.functional.linear(lowercase_ , lowercase_ , bias=lowercase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase : Optional[int] = nn.functional.linear(lowercase_ , proj.t().contiguous() )
lowerCAmelCase : Tuple = nn.functional.linear(lowercase_ , lowercase_ , bias=lowercase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _snake_case ( self , lowercase_ , lowercase_=None , lowercase_=False ) -> Any:
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase : List[Any] = hidden[..., :-1, :].contiguous()
lowerCAmelCase : int = labels[..., 1:].contiguous()
lowerCAmelCase : str = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase : List[Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
lowerCAmelCase : str = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase : List[str] = self._compute_logit(lowercase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase : Dict = labels != -100
lowerCAmelCase : Tuple = torch.zeros_like(lowercase_ , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase : Dict = (
-nn.functional.log_softmax(lowercase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase : Dict = nn.functional.log_softmax(lowercase_ , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase : int = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase : Tuple = self.out_layers[i].weight
lowerCAmelCase : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowercase_ )
biases.append(lowercase_ )
lowerCAmelCase : Any = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase : Optional[Any] = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : Tuple = nn.functional.log_softmax(lowercase_ , dim=1 )
if labels is None:
lowerCAmelCase : int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase : List[str] = torch.zeros_like(lowercase_ , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase : int = 0
lowerCAmelCase : List[Any] = [0] + self.cutoffs
for i in range(len(lowercase_ ) - 1 ):
lowerCAmelCase : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase : List[str] = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase : List[str] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase : Dict = labels.index_select(0 , lowercase_ ) - l_idx
lowerCAmelCase : Union[str, Any] = head_logprob.index_select(0 , lowercase_ )
lowerCAmelCase : str = hidden.index_select(0 , lowercase_ )
else:
lowerCAmelCase : Optional[int] = hidden
if i == 0:
if labels is not None:
lowerCAmelCase : List[str] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase : List[str] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase : Tuple = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : Any = nn.functional.log_softmax(lowercase_ , dim=1 )
lowerCAmelCase : Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase : str = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase : Dict = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowercase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _snake_case ( self , lowercase_ ) -> List[str]:
if self.n_clusters == 0:
lowerCAmelCase : Union[str, Any] = self._compute_logit(lowercase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowercase_ , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase : Dict = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase : Optional[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase : str = self.out_layers[i].weight
lowerCAmelCase : List[Any] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowercase_ )
biases.append(lowercase_ )
lowerCAmelCase : Optional[int] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase : List[str] = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : List[str] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase : List[Any] = nn.functional.log_softmax(lowercase_ , dim=1 )
lowerCAmelCase : Optional[int] = [0] + self.cutoffs
for i in range(len(lowercase_ ) - 1 ):
lowerCAmelCase : Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase : Dict = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase : Dict = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase : Any = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : List[Any] = nn.functional.log_softmax(lowercase_ , dim=1 )
lowerCAmelCase : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase : int = logprob_i
return out
| 709 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "detr"
_UpperCamelCase: Dict = ["past_key_values"]
_UpperCamelCase: Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[int] = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = None, None, None
lowerCAmelCase : Any = use_timm_backbone
lowerCAmelCase : int = backbone_config
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Optional[Any] = num_queries
lowerCAmelCase : List[str] = d_model
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : Dict = encoder_layers
lowerCAmelCase : str = encoder_attention_heads
lowerCAmelCase : List[Any] = decoder_ffn_dim
lowerCAmelCase : List[Any] = decoder_layers
lowerCAmelCase : Union[str, Any] = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Optional[int] = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Tuple = encoder_layers
lowerCAmelCase : Optional[int] = auxiliary_loss
lowerCAmelCase : List[str] = position_embedding_type
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = use_pretrained_backbone
lowerCAmelCase : List[Any] = dilation
# Hungarian matcher
lowerCAmelCase : Tuple = class_cost
lowerCAmelCase : Union[str, Any] = bbox_cost
lowerCAmelCase : Optional[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : List[Any] = mask_loss_coefficient
lowerCAmelCase : Optional[int] = dice_loss_coefficient
lowerCAmelCase : Tuple = bbox_loss_coefficient
lowerCAmelCase : Dict = giou_loss_coefficient
lowerCAmelCase : str = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> Any:
return cls(backbone_config=lowercase_ , **lowercase_ )
def _snake_case ( self ) -> Dict[str, any]:
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : List[str] = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
class _a ( snake_case_ ):
_UpperCamelCase: Any = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 693 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
a_ :int = Lock()
def lowercase_ (A : Optional[Any] , A : Optional[Any] , A : List[Any] , A : Optional[Any] , A : Any , A : str , A : Union[str, Any] ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case__ : Optional[int] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case__ : Union[str, Any] = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case__ : Any = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case__ : List[Any] = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def lowercase_ (A : str ):
snake_case__ : int = []
snake_case__ : str = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case__ : Any = Pipe()
snake_case__ : Union[str, Any] = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case__ : List[str] = temp_rs
snake_case__ : Tuple = temp_rr
for i in range(1 , len(A ) - 1 ):
snake_case__ : Dict = Pipe()
snake_case__ : List[Any] = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case__ : int = temp_rs
snake_case__ : Optional[Any] = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
snake_case__ : Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ():
snake_case__ : Dict = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*A )
snake_case__ : List[Any] = odd_even_transposition(A )
print('Sorted List\n' )
print(*A )
if __name__ == "__main__":
main()
| 478 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class a_ ( unittest.TestCase ):
lowercase = MODEL_FOR_CAUSAL_LM_MAPPING
lowercase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase = text_generator("""This is a test""" , do_sample=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
UpperCamelCase = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
UpperCamelCase = text_generator("""This is a test""" , do_sample=_SCREAMING_SNAKE_CASE , num_return_sequences=2 , return_tensors=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{"""generated_token_ids""": ANY(_SCREAMING_SNAKE_CASE )},
{"""generated_token_ids""": ANY(_SCREAMING_SNAKE_CASE )},
] , )
UpperCamelCase = text_generator.model.config.eos_token_id
UpperCamelCase = """<pad>"""
UpperCamelCase = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=_SCREAMING_SNAKE_CASE , num_return_sequences=2 , batch_size=2 , return_tensors=_SCREAMING_SNAKE_CASE , )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{"""generated_token_ids""": ANY(_SCREAMING_SNAKE_CASE )},
{"""generated_token_ids""": ANY(_SCREAMING_SNAKE_CASE )},
],
[
{"""generated_token_ids""": ANY(_SCREAMING_SNAKE_CASE )},
{"""generated_token_ids""": ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
@require_tf
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase = text_generator("""This is a test""" , do_sample=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
UpperCamelCase = text_generator(["""This is a test""", """This is a second test"""] , do_sample=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = TextGenerationPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
return text_generator, ["This is a test", "Another test"]
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = """Hello I believe in"""
UpperCamelCase = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase = text_generator(_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
UpperCamelCase = text_generator(_SCREAMING_SNAKE_CASE , stop_sequence=""" fe""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , [{"""generated_text""": """Hello I believe in fe"""}] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = text_generator.model
UpperCamelCase = text_generator.tokenizer
UpperCamelCase = text_generator("""This is a test""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , [{"""generated_text""": ANY(_SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCamelCase = text_generator("""This is a test""" , return_full_text=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , [{"""generated_text""": ANY(_SCREAMING_SNAKE_CASE )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCamelCase = pipeline(task="""text-generation""" , model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , return_full_text=_SCREAMING_SNAKE_CASE )
UpperCamelCase = text_generator("""This is a test""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , [{"""generated_text""": ANY(_SCREAMING_SNAKE_CASE )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCamelCase = text_generator("""This is a test""" , return_full_text=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , [{"""generated_text""": ANY(_SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCamelCase = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[{"""generated_text""": ANY(_SCREAMING_SNAKE_CASE )}, {"""generated_text""": ANY(_SCREAMING_SNAKE_CASE )}],
[{"""generated_text""": ANY(_SCREAMING_SNAKE_CASE )}, {"""generated_text""": ANY(_SCREAMING_SNAKE_CASE )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[{"""generated_text""": ANY(_SCREAMING_SNAKE_CASE )}, {"""generated_text""": ANY(_SCREAMING_SNAKE_CASE )}],
[{"""generated_text""": ANY(_SCREAMING_SNAKE_CASE )}, {"""generated_text""": ANY(_SCREAMING_SNAKE_CASE )}],
] , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = text_generator("""test""" , return_full_text=_SCREAMING_SNAKE_CASE , return_text=_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = text_generator("""test""" , return_full_text=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = text_generator("""test""" , return_text=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase = text_generator("""""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , [{"""generated_text""": ANY(_SCREAMING_SNAKE_CASE )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
UpperCamelCase = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
import torch
# Classic `model_kwargs`
UpperCamelCase = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase = pipe("""This is a test""" )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase = pipe("""This is a test""" )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCamelCase = pipe("""This is a test""" )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def A__ ( self ) -> List[Any]:
"""simple docstring"""
import torch
UpperCamelCase = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def A__ ( self ) -> List[str]:
"""simple docstring"""
import torch
UpperCamelCase = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=_SCREAMING_SNAKE_CASE , top_p=0.5 )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = """Hello world"""
UpperCamelCase = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
UpperCamelCase = logging.get_logger("""transformers.generation.tf_utils""" )
else:
UpperCamelCase = logging.get_logger("""transformers.generation.utils""" )
UpperCamelCase = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_SCREAMING_SNAKE_CASE ) as cl:
UpperCamelCase = text_generator(_SCREAMING_SNAKE_CASE , max_length=10 , max_new_tokens=1 )
self.assertIn(_SCREAMING_SNAKE_CASE , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_SCREAMING_SNAKE_CASE ) as cl:
UpperCamelCase = text_generator(_SCREAMING_SNAKE_CASE , max_new_tokens=1 )
self.assertNotIn(_SCREAMING_SNAKE_CASE , cl.out )
with CaptureLogger(_SCREAMING_SNAKE_CASE ) as cl:
UpperCamelCase = text_generator(_SCREAMING_SNAKE_CASE , max_length=10 )
self.assertNotIn(_SCREAMING_SNAKE_CASE , cl.out )
| 35 |
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : List[str] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__a : int = Vector()
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : List[Any] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(SCREAMING_SNAKE_CASE__ ) , '(0,0,0,0,0,1)' )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Optional[int] = Vector([1, 2, 3, 4] )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 4 )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Union[str, Any] = Vector([1, 2] )
__a : Dict = Vector([1, 2, 3, 4, 5] )
__a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__a : int = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[Any] = Vector([1, 2, 3] )
__a : Optional[Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : List[Any] = Vector([1, 2, 3] )
__a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Optional[Any] = Vector([1, 2, 3] )
__a : Optional[int] = Vector([2, -1, 4] ) # for test of dot product
__a : str = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(str(zero_vector(1_0 ) ).count('0' ) , 1_0 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Dict = Vector([1, 2, 3] )
__a : str = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) , '(3,4,7)' )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : str = Vector([1, 0, 0, 0, 0, 0] )
__a : Optional[Any] = x.copy()
self.assertEqual(str(SCREAMING_SNAKE_CASE__ ) , str(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : int = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(SCREAMING_SNAKE_CASE__ ) , '(0,1,0)' )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : str = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Union[str, Any] = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Union[str, Any] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__a : Tuple = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__a : Any = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 47 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : int = 1_0_0 ) -> int:
_SCREAMING_SNAKE_CASE : Any = set()
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : Tuple = n + 1 # maximum limit
for a in range(2, lowerCamelCase__ ):
for b in range(2, lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : int = a**b # calculates the current power
collect_powers.add(lowerCamelCase__ ) # adds the result to the set
return len(lowerCamelCase__ )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 572 | 0 |
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
def get_matched_characters(__magic_name__ , __magic_name__ ) -> str:
_lowercase: Tuple = []
_lowercase: Union[str, Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_lowercase: Optional[int] = int(max(0 , i - limit ) )
_lowercase: Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__magic_name__ )
_lowercase: Optional[int] = f"{_stra[0:_stra.index(__magic_name__ )]} {_stra[_stra.index(__magic_name__ ) + 1:]}"
return "".join(__magic_name__ )
# matching characters
_lowercase: Any = get_matched_characters(__magic_name__ , __magic_name__ )
_lowercase: Dict = get_matched_characters(__magic_name__ , __magic_name__ )
_lowercase: Dict = len(__magic_name__ )
# transposition
_lowercase: Tuple = (
len([(ca, ca) for ca, ca in zip(__magic_name__ , __magic_name__ ) if ca != ca] ) // 2
)
if not match_count:
_lowercase: Dict = 0.0
else:
_lowercase: List[str] = (
1
/ 3
* (
match_count / len(__magic_name__ )
+ match_count / len(__magic_name__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowercase: Any = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 714 |
def __lowerCAmelCase ( __magic_name__ ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_SCREAMING_SNAKE_CASE : str = int(input('Enter number: ').strip())
print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 206 | 0 |
from collections.abc import Sequence
def a__ ( _UpperCamelCase : Sequence[float] ,_UpperCamelCase : float ):
return sum(c * (x**i) for i, c in enumerate(__UpperCAmelCase ) )
def a__ ( _UpperCamelCase : Sequence[float] ,_UpperCamelCase : float ):
__lowerCamelCase = 0.0
for coeff in reversed(__UpperCAmelCase ):
__lowerCamelCase = result * x + coeff
return result
if __name__ == "__main__":
a_ = (0.0, 0.0, 5.0, 9.3, 7.0)
a_ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 175 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "efficientnet"
def __init__( self : Optional[Any] , __snake_case : int = 3 , __snake_case : int = 6_0_0 , __snake_case : float = 2.0 , __snake_case : float = 3.1 , __snake_case : int = 8 , __snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3] , __snake_case : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __snake_case : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __snake_case : List[int] = [] , __snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1] , __snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1] , __snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6] , __snake_case : float = 0.25 , __snake_case : str = "swish" , __snake_case : int = 2_5_6_0 , __snake_case : str = "mean" , __snake_case : float = 0.02 , __snake_case : float = 0.001 , __snake_case : float = 0.99 , __snake_case : float = 0.5 , __snake_case : float = 0.2 , **__snake_case : List[Any] , ) -> List[Any]:
super().__init__(**__snake_case )
__magic_name__: str = num_channels
__magic_name__: List[str] = image_size
__magic_name__: List[str] = width_coefficient
__magic_name__: Optional[Any] = depth_coefficient
__magic_name__: Tuple = depth_divisor
__magic_name__: Dict = kernel_sizes
__magic_name__: int = in_channels
__magic_name__: str = out_channels
__magic_name__: Dict = depthwise_padding
__magic_name__: Union[str, Any] = strides
__magic_name__: Dict = num_block_repeats
__magic_name__: Tuple = expand_ratios
__magic_name__: List[str] = squeeze_expansion_ratio
__magic_name__: Any = hidden_act
__magic_name__: Tuple = hidden_dim
__magic_name__: int = pooling_type
__magic_name__: int = initializer_range
__magic_name__: List[str] = batch_norm_eps
__magic_name__: str = batch_norm_momentum
__magic_name__: List[str] = dropout_rate
__magic_name__: Dict = drop_connect_rate
__magic_name__: Optional[Any] = sum(__snake_case ) * 4
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ) -> float:
return 1E-5
| 96 | 0 |
# flake8: noqa
# Lint as: python3
a= [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 704 | '''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__UpperCamelCase : Dict = Vector()
def lowerCAmelCase ( self ):
__UpperCamelCase : str = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_lowerCamelCase ) , '(0,0,0,0,0,1)' )
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(_lowerCamelCase ) , 4 )
def lowerCAmelCase ( self ):
__UpperCamelCase : Any = Vector([1, 2] )
__UpperCamelCase : Optional[int] = Vector([1, 2, 3, 4, 5] )
__UpperCamelCase : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__UpperCamelCase : List[str] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def lowerCAmelCase ( self ):
__UpperCamelCase : int = Vector([1, 2, 3] )
__UpperCamelCase : List[str] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[Any] = Vector([1, 2, 3] )
__UpperCamelCase : Dict = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = Vector([1, 2, 3] )
__UpperCamelCase : int = Vector([2, -1, 4] ) # for test of dot product
__UpperCamelCase : int = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def lowerCAmelCase ( self ):
self.assertEqual(str(zero_vector(1_0 ) ).count('0' ) , 1_0 )
def lowerCAmelCase ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[int] = Vector([1, 2, 3] )
__UpperCamelCase : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _lowerCamelCase , _lowerCamelCase ) ) , '(3,4,7)' )
def lowerCAmelCase ( self ):
__UpperCamelCase : List[str] = Vector([1, 0, 0, 0, 0, 0] )
__UpperCamelCase : Optional[Any] = x.copy()
self.assertEqual(str(_lowerCamelCase ) , str(_lowerCamelCase ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : List[str] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_lowerCamelCase ) , '(0,1,0)' )
def lowerCAmelCase ( self ):
__UpperCamelCase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_lowerCamelCase ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase : List[str] = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_lowerCamelCase , _lowerCamelCase ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase : List[str] = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_lowerCamelCase , _lowerCamelCase ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCAmelCase ( self ):
__UpperCamelCase : int = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__UpperCamelCase : Union[str, Any] = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_lowerCamelCase ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def lowerCAmelCase ( self ):
__UpperCamelCase : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def lowerCAmelCase ( self ):
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 287 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Tuple = AutoencoderKL
lowercase__: int = '''sample'''
lowercase__: Dict = 1e-2
@property
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = 4
__snake_case : Optional[Any] = 3
__snake_case : Optional[Any] = (32, 32)
__snake_case : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__magic_name__ )
return {"sample": image}
@property
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
__snake_case : List[Any] = self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__snake_case , __snake_case : List[Any] = self.prepare_init_args_and_inputs_for_common()
__snake_case : int = self.model_class(**__magic_name__ )
model.to(__magic_name__ )
assert not model.is_gradient_checkpointing and model.training
__snake_case : Any = model(**__magic_name__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case : List[str] = torch.randn_like(__magic_name__ )
__snake_case : Dict = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case : Optional[int] = self.model_class(**__magic_name__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__magic_name__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case : Dict = model_a(**__magic_name__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case : Optional[Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__snake_case : Any = dict(model.named_parameters() )
__snake_case : Any = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__magic_name__ )
__snake_case : Tuple = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
__snake_case : Optional[int] = model.to(__magic_name__ )
model.eval()
if torch_device == "mps":
__snake_case : Optional[Any] = torch.manual_seed(0 )
else:
__snake_case : Optional[int] = torch.Generator(device=__magic_name__ ).manual_seed(0 )
__snake_case : Optional[Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case : Any = image.to(__magic_name__ )
with torch.no_grad():
__snake_case : Any = model(__magic_name__ , sample_posterior=__magic_name__ , generator=__magic_name__ ).sample
__snake_case : int = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case : Optional[int] = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
__snake_case : List[Any] = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__snake_case : str = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(__magic_name__ , __magic_name__ , rtol=1E-2 ) )
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : str , __magic_name__ : Tuple , __magic_name__ : str ) -> Optional[Any]:
"""simple docstring"""
return f'''gaussian_noise_s={seed}_shape={"_".join([str(__magic_name__ ) for s in shape] )}.npy'''
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Tuple , __magic_name__ : str=0 , __magic_name__ : Tuple=(4, 3, 5_12, 5_12) , __magic_name__ : Optional[int]=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = torch.floataa if fpaa else torch.floataa
__snake_case : str = torch.from_numpy(load_hf_numpy(self.get_file_format(__magic_name__ , __magic_name__ ) ) ).to(__magic_name__ ).to(__magic_name__ )
return image
def lowercase__ ( self : Dict , __magic_name__ : Any="CompVis/stable-diffusion-v1-4" , __magic_name__ : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = """fp16""" if fpaa else None
__snake_case : Tuple = torch.floataa if fpaa else torch.floataa
__snake_case : Dict = AutoencoderKL.from_pretrained(
__magic_name__ , subfolder="""vae""" , torch_dtype=__magic_name__ , revision=__magic_name__ , )
model.to(__magic_name__ ).eval()
return model
def lowercase__ ( self : Tuple , __magic_name__ : Tuple=0 ) -> int:
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(__magic_name__ )
return torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : List[str] = self.get_sd_vae_model()
__snake_case : int = self.get_sd_image(__magic_name__ )
__snake_case : Tuple = self.get_generator(__magic_name__ )
with torch.no_grad():
__snake_case : List[str] = model(__magic_name__ , generator=__magic_name__ , sample_posterior=__magic_name__ ).sample
assert sample.shape == image.shape
__snake_case : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : str = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__magic_name__ , __magic_name__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def lowercase__ ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[Any] = self.get_sd_vae_model(fpaa=__magic_name__ )
__snake_case : int = self.get_sd_image(__magic_name__ , fpaa=__magic_name__ )
__snake_case : Union[str, Any] = self.get_generator(__magic_name__ )
with torch.no_grad():
__snake_case : List[str] = model(__magic_name__ , generator=__magic_name__ , sample_posterior=__magic_name__ ).sample
assert sample.shape == image.shape
__snake_case : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : int = torch.tensor(__magic_name__ )
assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(__magic_name__ )
with torch.no_grad():
__snake_case : str = model(__magic_name__ ).sample
assert sample.shape == image.shape
__snake_case : int = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__magic_name__ , __magic_name__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = self.get_sd_vae_model()
__snake_case : Tuple = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : Dict = model.decode(__magic_name__ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
__snake_case : int = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case : Optional[Any] = torch.tensor(__magic_name__ )
assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def lowercase__ ( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = self.get_sd_vae_model(fpaa=__magic_name__ )
__snake_case : Optional[Any] = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) , fpaa=__magic_name__ )
with torch.no_grad():
__snake_case : List[str] = model.decode(__magic_name__ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
__snake_case : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Optional[Any] = torch.tensor(__magic_name__ )
assert torch_all_close(__magic_name__ , __magic_name__ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def lowercase__ ( self : Any , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[str] = self.get_sd_vae_model(fpaa=__magic_name__ )
__snake_case : Optional[int] = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) , fpaa=__magic_name__ )
with torch.no_grad():
__snake_case : Any = model.decode(__magic_name__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : int = model.decode(__magic_name__ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def lowercase__ ( self : Dict , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = self.get_sd_vae_model()
__snake_case : Dict = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : List[str] = model.decode(__magic_name__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : List[Any] = model.decode(__magic_name__ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = self.get_sd_vae_model()
__snake_case : Optional[int] = self.get_sd_image(__magic_name__ )
__snake_case : List[Any] = self.get_generator(__magic_name__ )
with torch.no_grad():
__snake_case : List[Any] = model.encode(__magic_name__ ).latent_dist
__snake_case : Optional[Any] = dist.sample(generator=__magic_name__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case : int = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case : Tuple = torch.tensor(__magic_name__ )
__snake_case : Any = 3E-3 if torch_device != """mps""" else 1E-2
assert torch_all_close(__magic_name__ , __magic_name__ , atol=__magic_name__ )
| 26 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = """encoder-decoder"""
snake_case : Optional[int] = True
def __init__( self , **__lowerCAmelCase ):
super().__init__(**__lowerCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCamelCase__ = kwargs.pop("""encoder""" )
UpperCamelCase__ = encoder_config.pop("""model_type""" )
UpperCamelCase__ = kwargs.pop("""decoder""" )
UpperCamelCase__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
UpperCamelCase__ = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = True
@classmethod
def _lowerCamelCase ( cls , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
UpperCamelCase__ = True
UpperCamelCase__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.encoder.to_dict()
UpperCamelCase__ = self.decoder.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 619 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_A)
class snake_case_ ( _A):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase :str = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True})
lowerCamelCase :ClassVar[Features] = Features({"text": Value("string")})
lowerCamelCase :ClassVar[Features] = Features({"labels": ClassLabel})
lowerCamelCase :str = "text"
lowerCamelCase :str = "labels"
def __lowercase ( self , __lowercase ) -> Any:
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __lowercase ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
lowerCamelCase : List[Any] =copy.deepcopy(self )
lowerCamelCase : List[Any] =self.label_schema.copy()
lowerCamelCase : Optional[Any] =features[self.label_column]
lowerCamelCase : List[Any] =label_schema
return task_template
@property
def __lowercase ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 715 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
snake_case_ = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
snake_case_ = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
snake_case_ = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def A__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
def remove_articles(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Tuple =re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(SCREAMING_SNAKE_CASE_ , ''' ''' , SCREAMING_SNAKE_CASE_ )
def white_space_fix(SCREAMING_SNAKE_CASE_ ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) )
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return int(normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ ) )
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] =[any(compute_exact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for ref in refs ) for pred, refs in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return (sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )) * 1_0_0
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCamelCase : Any =[rgram for rgrams in rgramslist for rgram in rgrams]
lowerCamelCase : int =Counter(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict =Counter(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any =Counter()
for sgram, scount in sgramcounter.items():
lowerCamelCase : Tuple =scount * numref
lowerCamelCase : Optional[int] =Counter(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple =Counter()
for cgram, ccount in cgramcounter.items():
lowerCamelCase : Tuple =ccount * numref
# KEEP
lowerCamelCase : str =sgramcounter_rep & cgramcounter_rep
lowerCamelCase : Union[str, Any] =keepgramcounter_rep & rgramcounter
lowerCamelCase : Optional[Any] =sgramcounter_rep & rgramcounter
lowerCamelCase : Optional[Any] =0
lowerCamelCase : List[Any] =0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase : Tuple =1
lowerCamelCase : int =1
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : Tuple =keeptmpscorea / len(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCamelCase : Any =keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCamelCase : Optional[Any] =0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCamelCase : Optional[int] =2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCamelCase : int =sgramcounter_rep - cgramcounter_rep
lowerCamelCase : Dict =delgramcounter_rep - rgramcounter
lowerCamelCase : Dict =sgramcounter_rep - rgramcounter
lowerCamelCase : Optional[int] =0
lowerCamelCase : List[Any] =0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase : str =1
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : Optional[int] =deltmpscorea / len(SCREAMING_SNAKE_CASE_ )
# ADDITION
lowerCamelCase : List[Any] =set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =set(SCREAMING_SNAKE_CASE_ ) & set(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] =set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase : int =1
lowerCamelCase : List[Any] =1
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : str =addtmpscore / len(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : List[str] =addtmpscore / len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] =0
if addscore_precision > 0 or addscore_recall > 0:
lowerCamelCase : Optional[Any] =2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
lowerCamelCase : Optional[int] =len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict =ssent.split(''' ''' )
lowerCamelCase : Any =csent.split(''' ''' )
lowerCamelCase : str =[]
lowerCamelCase : Optional[Any] =[]
lowerCamelCase : List[Any] =[]
lowerCamelCase : List[str] =[]
lowerCamelCase : Tuple =[]
lowerCamelCase : Optional[Any] =[]
lowerCamelCase : int =[]
lowerCamelCase : List[str] =[]
lowerCamelCase : Dict =[]
lowerCamelCase : Any =[]
for rsent in rsents:
lowerCamelCase : Any =rsent.split(''' ''' )
lowerCamelCase : int =[]
lowerCamelCase : Optional[Any] =[]
lowerCamelCase : List[Any] =[]
ragramslist.append(SCREAMING_SNAKE_CASE_ )
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCamelCase : Optional[int] =ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 2:
lowerCamelCase : str =ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 3:
lowerCamelCase : int =ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(SCREAMING_SNAKE_CASE_ )
ragramslist.append(SCREAMING_SNAKE_CASE_ )
ragramslist.append(SCREAMING_SNAKE_CASE_ )
ragramslist.append(SCREAMING_SNAKE_CASE_ )
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCamelCase : Optional[int] =sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 2:
lowerCamelCase : List[Any] =sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 3:
lowerCamelCase : Optional[int] =sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(SCREAMING_SNAKE_CASE_ )
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCamelCase : Optional[int] =cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 2:
lowerCamelCase : List[str] =cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 3:
lowerCamelCase : str =cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : Any =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : Optional[Any] =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : List[Any] =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : List[str] =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] =sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCamelCase : List[str] =sum([delascore, delascore, delascore, delascore] ) / 4
lowerCamelCase : int =sum([addascore, addascore, addascore, addascore] ) / 4
lowerCamelCase : Any =(avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = "13a" , SCREAMING_SNAKE_CASE_ = True ) -> Any:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
lowerCamelCase : Union[str, Any] =sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCamelCase : List[Any] =sacrebleu.metrics.bleu._get_tokenizer(SCREAMING_SNAKE_CASE_ )()(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : Any =sacrebleu.TOKENIZERS[tokenizer]()(SCREAMING_SNAKE_CASE_ )
elif tokenizer == "moses":
lowerCamelCase : int =sacremoses.MosesTokenizer().tokenize(SCREAMING_SNAKE_CASE_ , return_str=SCREAMING_SNAKE_CASE_ , escape=SCREAMING_SNAKE_CASE_ )
elif tokenizer == "penn":
lowerCamelCase : Any =sacremoses.MosesTokenizer().penn_tokenize(SCREAMING_SNAKE_CASE_ , return_str=SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : Optional[int] =sentence
if not return_str:
lowerCamelCase : Union[str, Any] =normalized_sent.split()
return normalized_sent
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if not (len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
lowerCamelCase : Dict =0
for src, pred, refs in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
sari_score += SARIsent(normalize(SCREAMING_SNAKE_CASE_ ) , normalize(SCREAMING_SNAKE_CASE_ ) , [normalize(SCREAMING_SNAKE_CASE_ ) for sent in refs] )
lowerCamelCase : str =sari_score / len(SCREAMING_SNAKE_CASE_ )
return 1_0_0 * sari_score
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="exp" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ) -> Dict:
lowerCamelCase : Optional[int] =len(references[0] )
if any(len(SCREAMING_SNAKE_CASE_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowerCamelCase : Optional[int] =[[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE_ )]
lowerCamelCase : Union[str, Any] =sacrebleu.corpus_bleu(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , smooth_method=SCREAMING_SNAKE_CASE_ , smooth_value=SCREAMING_SNAKE_CASE_ , force=SCREAMING_SNAKE_CASE_ , lowercase=SCREAMING_SNAKE_CASE_ , use_effective_order=SCREAMING_SNAKE_CASE_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case_ ( datasets.Metric):
def __lowercase ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowercase ( self , __lowercase , __lowercase , __lowercase ) -> Tuple:
lowerCamelCase : str ={}
result.update({'''sari''': compute_sari(sources=__lowercase , predictions=__lowercase , references=__lowercase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__lowercase , references=__lowercase )} )
result.update({'''exact''': compute_em(predictions=__lowercase , references=__lowercase )} )
return result
| 262 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 134 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *__lowercase : Optional[Any] , __lowercase : Tuple=None , __lowercase : int=None , **__lowercase : List[Any] ):
'''simple docstring'''
super().__init__(*__lowercase , **__lowercase )
__UpperCAmelCase : Optional[Any] = eval_examples
__UpperCAmelCase : Tuple = post_process_function
def A_ ( self : Any , __lowercase : Dict=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : str = "eval" ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
__UpperCAmelCase : Any = self.get_eval_dataloader(__lowercase )
__UpperCAmelCase : Tuple = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCAmelCase : Union[str, Any] = self.compute_metrics
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__UpperCAmelCase : Union[str, Any] = time.time()
try:
__UpperCAmelCase : Optional[Any] = eval_loop(
__lowercase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowercase , metric_key_prefix=__lowercase , )
finally:
__UpperCAmelCase : List[str] = compute_metrics
__UpperCAmelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__lowercase , __lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__UpperCAmelCase : List[str] = self.post_process_function(__lowercase , __lowercase , output.predictions )
__UpperCAmelCase : Dict = self.compute_metrics(__lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__UpperCAmelCase : Tuple = metrics.pop(__lowercase )
metrics.update(output.metrics )
else:
__UpperCAmelCase : int = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__UpperCAmelCase : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowercase )
return metrics
def A_ ( self : str , __lowercase : str , __lowercase : Tuple , __lowercase : Optional[Any]=None , __lowercase : str = "test" ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.get_test_dataloader(__lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCAmelCase : Any = self.compute_metrics
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__UpperCAmelCase : str = time.time()
try:
__UpperCAmelCase : Optional[Any] = eval_loop(
__lowercase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowercase , metric_key_prefix=__lowercase , )
finally:
__UpperCAmelCase : int = compute_metrics
__UpperCAmelCase : Tuple = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__lowercase , __lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__UpperCAmelCase : int = self.post_process_function(__lowercase , __lowercase , output.predictions , '''predict''' )
__UpperCAmelCase : List[Any] = self.compute_metrics(__lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__UpperCAmelCase : List[str] = metrics.pop(__lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowercase ) | 522 | 0 |
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
__magic_name__ : List[str] = len(lowerCAmelCase__ )
print('''The following activities are selected:''' )
# The first activity is always selected
__magic_name__ : Dict = 0
print(lowerCAmelCase__, end=''',''' )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__, end=''',''' )
__magic_name__ : Optional[int] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = [1, 3, 0, 5, 8, 5]
lowercase_ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 714 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 16
lowercase_ = 32
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = 16, UpperCAmelCase = "bert-base-cased" ) ->Tuple:
"""simple docstring"""
__magic_name__ : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCAmelCase )
__magic_name__ : List[str] = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ : List[Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCAmelCase, max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ : Tuple = datasets.map(
UpperCAmelCase, batched=UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ : Optional[int] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(UpperCAmelCase, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ : List[Any] = DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase )
__magic_name__ : int = DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->Tuple:
"""simple docstring"""
model.eval()
__magic_name__ : Optional[Any] = 0
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ : List[Any] = model(**UpperCAmelCase )
__magic_name__ : List[str] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ : Optional[int] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase ) - 1:
__magic_name__ : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase, references=UpperCAmelCase, )
__magic_name__ : Optional[Any] = metric.compute()
return eval_metric["accuracy"]
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
__magic_name__ : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ : Tuple = config['''lr''']
__magic_name__ : Any = int(config['''num_epochs'''] )
__magic_name__ : Tuple = int(config['''seed'''] )
__magic_name__ : Any = int(config['''batch_size'''] )
__magic_name__ : int = args.model_name_or_path
set_seed(UpperCAmelCase )
__magic_name__ , __magic_name__ : Optional[Any] = get_dataloaders(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ : Dict = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase, return_dict=UpperCAmelCase )
# Instantiate optimizer
__magic_name__ : Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ : List[str] = optimizer_cls(params=model.parameters(), lr=UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ : Tuple = 1
__magic_name__ : str = (len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase, num_warmup_steps=0, num_training_steps=UpperCAmelCase, )
else:
__magic_name__ : Optional[Any] = DummyScheduler(UpperCAmelCase, total_num_steps=UpperCAmelCase, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : str = accelerator.prepare(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
__magic_name__ : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ : Any = 0
__magic_name__ : int = evaluate.load('''glue''', '''mrpc''' )
__magic_name__ : Tuple = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ : int = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ : List[Any] = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ : str = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ : Dict = int(UpperCAmelCase ) + 1
__magic_name__ : Optional[Any] = evaluation_loop(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
accelerator.print('''resumed checkpoint performance:''', UpperCAmelCase )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir, F'''state_{starting_epoch-1}.json''' ), '''r''' ) as f:
__magic_name__ : Any = json.load(UpperCAmelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ : str = {}
for epoch in range(UpperCAmelCase, UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
__magic_name__ : Optional[Any] = model(**UpperCAmelCase )
__magic_name__ : str = outputs.loss
__magic_name__ : str = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ : str = F'''epoch_{epoch}'''
__magic_name__ : Any = os.path.join(args.output_dir, UpperCAmelCase )
accelerator.save_state(UpperCAmelCase )
__magic_name__ : Optional[int] = evaluation_loop(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
__magic_name__ : List[str] = accuracy
__magic_name__ : List[Any] = lr_scheduler.get_lr()[0]
__magic_name__ : Tuple = optimizer.param_groups[0]['''lr''']
__magic_name__ : Optional[int] = epoch
__magic_name__ : Union[str, Any] = overall_step
accelerator.print(F'''epoch {epoch}:''', UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F'''state_{epoch}.json''' ), '''w''' ) as f:
json.dump(UpperCAmelCase, UpperCAmelCase )
def lowerCAmelCase ( ) ->List[str]:
"""simple docstring"""
__magic_name__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=UpperCAmelCase, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=UpperCAmelCase, )
parser.add_argument(
'''--output_dir''', type=UpperCAmelCase, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--resume_from_checkpoint''', type=UpperCAmelCase, default=UpperCAmelCase, help='''If the training should continue from a checkpoint folder.''', )
parser.add_argument(
'''--partial_train_epoch''', type=UpperCAmelCase, default=UpperCAmelCase, help='''If passed, the training will stop after this number of epochs.''', )
parser.add_argument(
'''--num_epochs''', type=UpperCAmelCase, default=2, help='''Number of train epochs.''', )
__magic_name__ : List[str] = parser.parse_args()
__magic_name__ : str = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase, UpperCAmelCase )
if __name__ == "__main__":
main()
| 336 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : list[tuple[float, float]] ) -> Optional[int]:
lowerCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCAmelCase = len(UpperCAmelCase__ ) - 1
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCAmelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCAmelCase__ ) , 5 ) == 1
return output_values
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase = self.basis_function(UpperCAmelCase__ )
lowerCAmelCase = 0.0
lowerCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : float = 0.01 ) -> Union[str, Any]:
from matplotlib import pyplot as plt # type: ignore
lowerCAmelCase = [] # x coordinates of points to plot
lowerCAmelCase = [] # y coordinates of points to plot
lowerCAmelCase = 0.0
while t <= 1:
lowerCAmelCase = self.bezier_curve_function(UpperCAmelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCAmelCase = [i[0] for i in self.list_of_points]
lowerCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
UpperCAmelCase__ , UpperCAmelCase__ , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 133 | 0 |
'''simple docstring'''
class a_ :
def __init__(self , __a) -> List[Any]:
"""simple docstring"""
__snake_case : Any = set_counts
__snake_case : Optional[int] = max(UpperCAmelCase_)
__snake_case : Optional[int] = len(UpperCAmelCase_)
__snake_case : str = [1] * num_sets
__snake_case : Optional[int] = list(range(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]:
"""simple docstring"""
__snake_case : int = self.get_parent(UpperCAmelCase_)
__snake_case : List[str] = self.get_parent(UpperCAmelCase_)
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__snake_case : Dict = 0
__snake_case : Tuple = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__snake_case : Dict = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__snake_case : Optional[Any] = 0
__snake_case : int = src_parent
__snake_case : str = self.set_counts[src_parent]
__snake_case : str = max(self.max_set , UpperCAmelCase_)
return True
def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
__snake_case : Any = self.get_parent(self.parents[disj_set])
return self.parents[disj_set] | 706 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> int:
"""simple docstring"""
if not is_accelerate_available():
return method
__snake_case : Optional[Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(A ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[Any] , *A : Optional[Any] , **A : Optional[int] ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *A , **A )
return wrapper | 61 | 0 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool = False ) -> str:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = F'''Expected string as input, found {type(_SCREAMING_SNAKE_CASE )}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = F'''Expected boolean as use_pascal parameter, found {type(_SCREAMING_SNAKE_CASE )}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = input_str.split("_" )
UpperCAmelCase_ : Optional[int] = 0 if use_pascal else 1
UpperCAmelCase_ : Dict = words[start_index:]
UpperCAmelCase_ : List[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase_ : Tuple = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 71 |
from ...configuration_utils import PretrainedConfig
_snake_case = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class lowerCAmelCase_ ( _lowercase ):
"""simple docstring"""
UpperCAmelCase__ = "tapas"
def __init__( self , _SCREAMING_SNAKE_CASE=30_522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3_072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1_024 , _SCREAMING_SNAKE_CASE=[3, 256, 256, 2, 256, 256, 10] , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1_0.0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="ratio" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_sizes
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
# Fine-tuning task hyperparameters
__UpperCamelCase = positive_label_weight
__UpperCamelCase = num_aggregation_labels
__UpperCamelCase = aggregation_loss_weight
__UpperCamelCase = use_answer_as_supervision
__UpperCamelCase = answer_loss_importance
__UpperCamelCase = use_normalized_answer_loss
__UpperCamelCase = huber_loss_delta
__UpperCamelCase = temperature
__UpperCamelCase = aggregation_temperature
__UpperCamelCase = use_gumbel_for_cells
__UpperCamelCase = use_gumbel_for_aggregation
__UpperCamelCase = average_approximation_function
__UpperCamelCase = cell_selection_preference
__UpperCamelCase = answer_loss_cutoff
__UpperCamelCase = max_num_rows
__UpperCamelCase = max_num_columns
__UpperCamelCase = average_logits_per_cell
__UpperCamelCase = select_one_column
__UpperCamelCase = allow_empty_column_selection
__UpperCamelCase = init_cell_selection_weights_to_zero
__UpperCamelCase = reset_position_index_per_cell
__UpperCamelCase = disable_per_token_loss
# Aggregation hyperparameters
__UpperCamelCase = aggregation_labels
__UpperCamelCase = no_aggregation_label_index
if isinstance(self.aggregation_labels , _SCREAMING_SNAKE_CASE ):
__UpperCamelCase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in aggregation_labels.items()}
| 383 | 0 |
"""simple docstring"""
def lowercase_ ( _lowercase : list[int] ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = []
if len(_lowercase ) == 1:
return [nums.copy()]
for _ in range(len(_lowercase ) ):
UpperCAmelCase : Union[str, Any] = nums.pop(0 )
UpperCAmelCase : List[str] = permute(_lowercase )
for perm in permutations:
perm.append(_lowercase )
result.extend(_lowercase )
nums.append(_lowercase )
return result
def lowercase_ ( _lowercase : Optional[Any] ):
'''simple docstring'''
def backtrack(_lowercase : Dict ):
if start == len(_lowercase ) - 1:
output.append(nums[:] )
else:
for i in range(_lowercase , len(_lowercase ) ):
UpperCAmelCase : str = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase : Tuple = nums[i], nums[start] # backtrack
UpperCAmelCase : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case_ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 711 |
"""simple docstring"""
from functools import reduce
snake_case_ : List[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowercase_ ( _lowercase : str = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 292 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Optional[Any] = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ["""ConvNextFeatureExtractor"""]
__UpperCamelCase : List[str] = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 80 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=snake_case_ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=snake_case_ , default=5 )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=snake_case_ , default=1 )
parser.add_argument('''--freeze''' , type=snake_case_ , default=snake_case_ )
parser.add_argument('''--learning_rate''' , type=snake_case_ , default=5e-4 )
parser.add_argument('''--seed''' , type=snake_case_ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=snake_case_ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=snake_case_ , default=10 )
parser.add_argument('''--weight_decay''' , type=snake_case_ , default=0.01 )
parser.add_argument('''--output_dir''' , type=snake_case_ , default='''./results''' )
return parser.parse_args()
snake_case_ = load('accuracy')
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> List[Any]:
__snake_case , __snake_case = eval_pred
__snake_case = np.argmax(snake_case_ , axis=1 )
return metric.compute(predictions=snake_case_ , references=snake_case_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Any , a__ : Any ):
"""simple docstring"""
super().__init__()
__snake_case = trainer
def a (self : List[Any] , a__ : Optional[Any] , a__ : int , a__ : Tuple , **a__ : Optional[Any] ):
"""simple docstring"""
if control.should_evaluate:
__snake_case = deepcopy(a__ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowerCamelCase__ ( ) -> Dict:
__snake_case = get_args()
set_seed(args.seed )
__snake_case = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
__snake_case = dataset.train_test_split(test_size=0.2 )
__snake_case = train_test['''test'''].train_test_split(test_size=0.5 )
__snake_case = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
__snake_case = AutoTokenizer.from_pretrained(args.model_ckpt )
__snake_case = tokenizer.eos_token
__snake_case = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__snake_case = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__snake_case = False
__snake_case = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(snake_case_ : Any ):
__snake_case = tokenizer(example['''src'''] , truncation=snake_case_ , max_length=1024 )
__snake_case = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__snake_case = train_test_validation.map(
snake_case_ , batched=snake_case_ , remove_columns=train_test_validation['''train'''].column_names , )
__snake_case = DataCollatorWithPadding(tokenizer=snake_case_ )
__snake_case = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
__snake_case = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=snake_case_ , data_collator=snake_case_ , compute_metrics=snake_case_ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(snake_case_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 592 | 0 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
UpperCamelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCamelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
SCREAMING_SNAKE_CASE__ = 'facebook/wmt19-en-de'
SCREAMING_SNAKE_CASE__ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
SCREAMING_SNAKE_CASE__ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
SCREAMING_SNAKE_CASE__ = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
SCREAMING_SNAKE_CASE__ = tokenizer(['Making tiny model'], return_tensors='pt')
SCREAMING_SNAKE_CASE__ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
SCREAMING_SNAKE_CASE__ = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 35 | 1 |
"""simple docstring"""
def lowercase ( __snake_case : list[int] ):
lowercase_ : Tuple = len(__snake_case )
for i in range(__snake_case ):
for j in range(i + 1 , __snake_case ):
if numbers[j] < numbers[i]:
lowercase_ , lowercase_ : Optional[Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__A : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
__A : List[str] = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 231 |
"""simple docstring"""
import math
import qiskit
def lowercase ( __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : int = 1 ):
if (
isinstance(__snake_case , __snake_case )
or isinstance(__snake_case , __snake_case )
or isinstance(__snake_case , __snake_case )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__snake_case ) != input_a)
or (math.floor(__snake_case ) != input_a)
or (math.floor(__snake_case ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowercase_ : List[Any] = qiskit.QuantumRegister(4 , '''qr''' )
lowercase_ : Dict = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowercase_ : Optional[Any] = [input_a, input_a, carry_in]
lowercase_ : List[str] = qiskit.QuantumCircuit(__snake_case , __snake_case )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__snake_case ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__snake_case ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__snake_case ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __snake_case ) # measure the last two qbits
lowercase_ : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
lowercase_ : Optional[int] = qiskit.execute(__snake_case , __snake_case , shots=1_0_0_0 )
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 231 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = CLIPTokenizer
__a = CLIPTokenizerFast
__a = True
__a = {}
__a = False
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCAmelCase : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : int = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
__UpperCAmelCase : str = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase ) )
def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : Union[str, Any] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = """lower newer"""
__UpperCAmelCase : Union[str, Any] = """lower newer"""
return input_text, output_text
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase : List[Any] = """lower newer"""
__UpperCAmelCase : str = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
__UpperCAmelCase : Any = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : Tuple = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
@require_ftfy
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Dict = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
__UpperCAmelCase : List[Any] = tokenizer_s.tokenize(UpperCamelCase )
__UpperCAmelCase : str = tokenizer_r.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__UpperCAmelCase : int = """xa\u0303y""" + """ """ + """x\xe3y"""
__UpperCAmelCase : Tuple = tokenizer_s.tokenize(UpperCamelCase )
__UpperCAmelCase : str = tokenizer_r.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Test that the tokenization is identical on unicode of space type
__UpperCAmelCase : List[Any] = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__UpperCAmelCase : List[str] = tokenizer_s.tokenize(UpperCamelCase )
__UpperCAmelCase : str = tokenizer_r.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Test that the tokenization is identical on unicode of line break type
__UpperCAmelCase : Optional[Any] = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__UpperCAmelCase : str = tokenizer_s.tokenize(UpperCamelCase )
__UpperCAmelCase : str = tokenizer_r.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Any = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase : str = f'''{text_of_1_token} {text_of_1_token}'''
__UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , )
__UpperCAmelCase : Any = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
__UpperCAmelCase : Any = f''' {text}'''
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , )
__UpperCAmelCase : Optional[Any] = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ) + 1, 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
pass
| 299 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
UpperCAmelCase : int = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """tapas"""
def __init__( self : Tuple , UpperCamelCase : Dict=30_522 , UpperCamelCase : Dict=768 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : Union[str, Any]=3_072 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Optional[int]=1_024 , UpperCamelCase : Dict=[3, 256, 256, 2, 256, 256, 10] , UpperCamelCase : List[str]=0.02 , UpperCamelCase : Union[str, Any]=1e-1_2 , UpperCamelCase : Tuple=0 , UpperCamelCase : List[str]=10.0 , UpperCamelCase : List[str]=0 , UpperCamelCase : str=1.0 , UpperCamelCase : Any=None , UpperCamelCase : Tuple=1.0 , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : List[str]=1.0 , UpperCamelCase : List[Any]=1.0 , UpperCamelCase : str=False , UpperCamelCase : Dict=False , UpperCamelCase : List[str]="ratio" , UpperCamelCase : Tuple=None , UpperCamelCase : Any=None , UpperCamelCase : int=64 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : int=True , UpperCamelCase : str=False , UpperCamelCase : Optional[int]=False , UpperCamelCase : List[str]=True , UpperCamelCase : int=False , UpperCamelCase : int=None , UpperCamelCase : Dict=None , **UpperCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : Optional[Any] = type_vocab_sizes
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__UpperCAmelCase : Union[str, Any] = positive_label_weight
__UpperCAmelCase : List[str] = num_aggregation_labels
__UpperCAmelCase : Dict = aggregation_loss_weight
__UpperCAmelCase : Any = use_answer_as_supervision
__UpperCAmelCase : Any = answer_loss_importance
__UpperCAmelCase : Any = use_normalized_answer_loss
__UpperCAmelCase : Tuple = huber_loss_delta
__UpperCAmelCase : List[Any] = temperature
__UpperCAmelCase : str = aggregation_temperature
__UpperCAmelCase : Optional[int] = use_gumbel_for_cells
__UpperCAmelCase : Union[str, Any] = use_gumbel_for_aggregation
__UpperCAmelCase : Tuple = average_approximation_function
__UpperCAmelCase : List[Any] = cell_selection_preference
__UpperCAmelCase : List[Any] = answer_loss_cutoff
__UpperCAmelCase : Any = max_num_rows
__UpperCAmelCase : int = max_num_columns
__UpperCAmelCase : Any = average_logits_per_cell
__UpperCAmelCase : int = select_one_column
__UpperCAmelCase : str = allow_empty_column_selection
__UpperCAmelCase : Optional[int] = init_cell_selection_weights_to_zero
__UpperCAmelCase : Dict = reset_position_index_per_cell
__UpperCAmelCase : List[Any] = disable_per_token_loss
# Aggregation hyperparameters
__UpperCAmelCase : Tuple = aggregation_labels
__UpperCAmelCase : Union[str, Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , UpperCamelCase ):
__UpperCAmelCase : int = {int(UpperCamelCase ): v for k, v in aggregation_labels.items()}
| 299 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__lowerCAmelCase : Any = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__lowerCAmelCase : int = '''UperNetConfig'''
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Union[int, Tuple[int, int]] , UpperCamelCase__ : Union[int, Tuple[int, int], str] = 0 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Union[int, Tuple[int, int]] = 1 , ) -> Tuple:
"""simple docstring"""
super().__init__()
__magic_name__ = nn.Convad(
in_channels=snake_case_ , out_channels=snake_case_ , kernel_size=snake_case_ , padding=snake_case_ , bias=snake_case_ , dilation=snake_case_ , )
__magic_name__ = nn.BatchNormad(snake_case_ )
__magic_name__ = nn.ReLU()
def _lowercase ( self : Optional[int] , UpperCamelCase__ : torch.Tensor ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.conv(snake_case_ )
__magic_name__ = self.batch_norm(snake_case_ )
__magic_name__ = self.activation(snake_case_ )
return output
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = [
nn.AdaptiveAvgPoolad(snake_case_ ),
UperNetConvModule(snake_case_ , snake_case_ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(snake_case_ ) , snake_case_ )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : torch.Tensor ) -> List[str]:
"""simple docstring"""
__magic_name__ = input
for layer in self.layers:
__magic_name__ = layer(snake_case_ )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple[int, ...] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool ) -> List[str]:
"""simple docstring"""
super().__init__()
__magic_name__ = pool_scales
__magic_name__ = align_corners
__magic_name__ = in_channels
__magic_name__ = channels
__magic_name__ = []
for i, pool_scale in enumerate(snake_case_ ):
__magic_name__ = UperNetPyramidPoolingBlock(pool_scale=snake_case_ , in_channels=snake_case_ , channels=snake_case_ )
self.blocks.append(snake_case_ )
self.add_module(str(snake_case_ ) , snake_case_ )
def _lowercase ( self : List[Any] , UpperCamelCase__ : torch.Tensor ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = []
for ppm in self.blocks:
__magic_name__ = ppm(snake_case_ )
__magic_name__ = nn.functional.interpolate(
snake_case_ , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(snake_case_ )
return ppm_outs
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = config
__magic_name__ = config.pool_scales # e.g. (1, 2, 3, 6)
__magic_name__ = in_channels
__magic_name__ = config.hidden_size
__magic_name__ = False
__magic_name__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__magic_name__ = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__magic_name__ = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__magic_name__ = nn.ModuleList()
__magic_name__ = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__magic_name__ = UperNetConvModule(snake_case_ , self.channels , kernel_size=1 )
__magic_name__ = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(snake_case_ )
self.fpn_convs.append(snake_case_ )
__magic_name__ = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowercase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self : int , UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if isinstance(snake_case_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self : int , UpperCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
__magic_name__ = inputs[-1]
__magic_name__ = [x]
psp_outs.extend(self.psp_modules(snake_case_ ) )
__magic_name__ = torch.cat(snake_case_ , dim=1 )
__magic_name__ = self.bottleneck(snake_case_ )
return output
def _lowercase ( self : Dict , UpperCamelCase__ : torch.Tensor ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(snake_case_ ) )
# build top-down path
__magic_name__ = len(snake_case_ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__magic_name__ = laterals[i - 1].shape[2:]
__magic_name__ = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=snake_case_ , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
__magic_name__ = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__magic_name__ = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
__magic_name__ = torch.cat(snake_case_ , dim=1 )
__magic_name__ = self.fpn_bottleneck(snake_case_ )
__magic_name__ = self.classifier(snake_case_ )
return output
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 3 , UpperCamelCase__ : Union[int, Tuple[int, int]] = 1 ) -> Any:
"""simple docstring"""
super().__init__()
__magic_name__ = config
__magic_name__ = config.auxiliary_in_channels
__magic_name__ = config.auxiliary_channels
__magic_name__ = config.auxiliary_num_convs
__magic_name__ = config.auxiliary_concat_input
__magic_name__ = in_index
__magic_name__ = (kernel_size // 2) * dilation
__magic_name__ = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=snake_case_ , padding=snake_case_ , dilation=snake_case_ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=snake_case_ , padding=snake_case_ , dilation=snake_case_ ) )
if self.num_convs == 0:
__magic_name__ = nn.Identity()
else:
__magic_name__ = nn.Sequential(*snake_case_ )
if self.concat_input:
__magic_name__ = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=snake_case_ , padding=kernel_size // 2 )
__magic_name__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
"""simple docstring"""
if isinstance(snake_case_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self : int , UpperCamelCase__ : torch.Tensor ) -> Dict:
"""simple docstring"""
__magic_name__ = encoder_hidden_states[self.in_index]
__magic_name__ = self.convs(snake_case_ )
if self.concat_input:
__magic_name__ = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__magic_name__ = self.classifier(snake_case_ )
return output
class UpperCAmelCase_ ( lowerCAmelCase_ ):
'''simple docstring'''
a__ = UperNetConfig
a__ = """pixel_values"""
a__ = True
def _lowercase ( self : List[str] , UpperCamelCase__ : Dict ) -> Tuple:
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowercase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = value
__lowerCAmelCase : int = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCAmelCase : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowerCAmelCase_ , )
class UpperCAmelCase_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : Tuple ) -> int:
"""simple docstring"""
super().__init__(snake_case_ )
__magic_name__ = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__magic_name__ = UperNetHead(snake_case_ , in_channels=self.backbone.channels )
__magic_name__ = UperNetFCNHead(snake_case_ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=snake_case_ , config_class=_CONFIG_FOR_DOC )
def _lowercase ( self : str , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[bool] = None , ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__ = output_attentions if output_attentions is not None else self.config.output_attentions
__magic_name__ = self.backbone.forward_with_filtered_kwargs(
snake_case_ , output_hidden_states=snake_case_ , output_attentions=snake_case_ )
__magic_name__ = outputs.feature_maps
__magic_name__ = self.decode_head(snake_case_ )
__magic_name__ = nn.functional.interpolate(snake_case_ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=snake_case_ )
__magic_name__ = None
if self.auxiliary_head is not None:
__magic_name__ = self.auxiliary_head(snake_case_ )
__magic_name__ = nn.functional.interpolate(
snake_case_ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=snake_case_ )
__magic_name__ = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
__magic_name__ = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__magic_name__ = loss_fct(snake_case_ , snake_case_ )
__magic_name__ = loss_fct(snake_case_ , snake_case_ )
__magic_name__ = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__magic_name__ = (logits,) + outputs[1:]
else:
__magic_name__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 529 |
'''simple docstring'''
from maths.prime_check import is_prime
def UpperCAmelCase_ ( __lowercase : int ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_UpperCAmelCase = f'Input value of [number={number}] must be an integer'
raise TypeError(__lowercase )
if is_prime(__lowercase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 236 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = MobileBertTokenizer
__UpperCAmelCase = MobileBertTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = filter_non_english
__UpperCAmelCase = """google/mobilebert-uncased"""
def lowercase_ (self ):
'''simple docstring'''
super().setUp()
_UpperCamelCase : List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def lowercase_ (self , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Any = "UNwant\u00E9d,running"
_UpperCamelCase : str = "unwanted, running"
return input_text, output_text
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : str = self.tokenizer_class(self.vocab_file )
_UpperCamelCase : Any = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCAmelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [9, 6, 7, 12, 10, 11] )
def lowercase_ (self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase : List[str] = self.get_tokenizer()
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer()
_UpperCamelCase : Tuple = "UNwant\u00E9d,running"
_UpperCamelCase : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCamelCase : str = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : List[Any] = self.get_rust_tokenizer()
_UpperCamelCase : List[Any] = tokenizer.encode(lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# With lower casing
_UpperCamelCase : Union[str, Any] = self.get_tokenizer(do_lower_case=lowerCAmelCase__ )
_UpperCamelCase : Any = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase__ )
_UpperCamelCase : int = "UNwant\u00E9d,running"
_UpperCamelCase : Optional[int] = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCamelCase : Tuple = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : List[Any] = self.get_rust_tokenizer()
_UpperCamelCase : Tuple = tokenizer.encode(lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : str = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : str = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Any = BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase__ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Dict = {}
for i, token in enumerate(lowerCAmelCase__ ):
_UpperCamelCase : int = i
_UpperCamelCase : List[str] = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def lowercase_ (self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowercase_ (self ):
'''simple docstring'''
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowercase_ (self ):
'''simple docstring'''
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
_UpperCamelCase : int = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
_UpperCamelCase : Union[str, Any] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase : str = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def lowercase_ (self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_UpperCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase : List[str] = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , )
_UpperCamelCase : Optional[int] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase__ , "do_lower_case" ) else False
_UpperCamelCase : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : str = ["的", "人", "有"]
_UpperCamelCase : Union[str, Any] = "".join(lowerCAmelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase : List[str] = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase : Tuple = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__ )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase : Dict = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__ )
_UpperCamelCase : Tuple = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Union[str, Any] = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase__ )
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 239 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 239 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
A : int = logging.get_logger(__name__)
class lowerCAmelCase_ ( a_ ):
def __init__( self : str, *_snake_case : List[Any], **_snake_case : Dict ):
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''', _snake_case, )
super().__init__(*_snake_case, **_snake_case )
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : str = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ["""LayoutLMv3FeatureExtractor"""]
A : Dict = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
A : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : int ):
__a : list[list[str]] = [[] for _ in range(_lowerCamelCase )]
__a : Any = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(_lowerCamelCase ) <= key:
return input_string
for position, character in enumerate(_lowerCamelCase ):
__a : str = position % (lowest * 2) # puts it in bounds
__a : Optional[Any] = min(_lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_lowerCamelCase )
__a : Dict = ["""""".join(_lowerCamelCase ) for row in temp_grid]
__a : Union[str, Any] = """""".join(_lowerCamelCase )
return output_string
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : int ):
__a : Tuple = []
__a : List[Any] = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
__a : list[list[str]] = [[] for _ in range(_lowerCamelCase )] # generates template
for position in range(len(_lowerCamelCase ) ):
__a : List[Any] = position % (lowest * 2) # puts it in bounds
__a : Tuple = min(_lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
__a : Tuple = 0
for row in temp_grid: # fills in the characters
__a : Union[str, Any] = input_string[counter : counter + len(_lowerCamelCase )]
grid.append(list(_lowerCamelCase ) )
counter += len(_lowerCamelCase )
__a : Dict = """""" # reads as zigzag
for position in range(len(_lowerCamelCase ) ):
__a : List[str] = position % (lowest * 2) # puts it in bounds
__a : Optional[Any] = min(_lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __magic_name__ ( _lowerCamelCase : str ):
__a : Tuple = {}
for key_guess in range(1 , len(_lowerCamelCase ) ): # tries every key
__a : Tuple = decrypt(_lowerCamelCase , _lowerCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 63 | 1 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_snake_case : Optional[Any] = get_logger(__name__)
_snake_case : Optional[int] = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class __SCREAMING_SNAKE_CASE :
@add_start_docstrings(_a )
def __call__( self, _a, _a ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __SCREAMING_SNAKE_CASE :
@add_start_docstrings(_a )
def __call__( self, _a, _a ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __SCREAMING_SNAKE_CASE ( _A ):
@add_start_docstrings(_a )
def __call__( self, _a, _a, _a, **_a ) -> jnp.ndarray:
for processor in self:
__SCREAMING_SNAKE_CASE = inspect.signature(processor.__call__ ).parameters
if len(_a ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
__SCREAMING_SNAKE_CASE = processor(_a, _a, _a, **_a )
else:
__SCREAMING_SNAKE_CASE = processor(_a, _a, _a )
return scores
class __SCREAMING_SNAKE_CASE ( _A ):
def __init__( self, _a ) -> Any:
if not isinstance(_a, _a ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__SCREAMING_SNAKE_CASE = temperature
def __call__( self, _a, _a, _a ) -> jnp.ndarray:
__SCREAMING_SNAKE_CASE = scores / self.temperature
return scores
class __SCREAMING_SNAKE_CASE ( _A ):
def __init__( self, _a, _a = -float("Inf" ), _a = 1 ) -> List[Any]:
if not isinstance(_a, _a ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_a, _a ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__SCREAMING_SNAKE_CASE = top_p
__SCREAMING_SNAKE_CASE = filter_value
__SCREAMING_SNAKE_CASE = min_tokens_to_keep
def __call__( self, _a, _a, _a ) -> jnp.ndarray:
__SCREAMING_SNAKE_CASE = lax.top_k(_a, scores.shape[-1] )
__SCREAMING_SNAKE_CASE = jnp.full_like(_a, self.filter_value )
__SCREAMING_SNAKE_CASE = jax.nn.softmax(_a, axis=-1 ).cumsum(axis=-1 )
__SCREAMING_SNAKE_CASE = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__SCREAMING_SNAKE_CASE = jnp.roll(_a, 1 )
score_mask |= score_mask.at[:, 0].set(_a )
# min tokens to keep
__SCREAMING_SNAKE_CASE = score_mask.at[:, : self.min_tokens_to_keep].set(_a )
__SCREAMING_SNAKE_CASE = jnp.where(_a, _a, _a )
__SCREAMING_SNAKE_CASE = jax.lax.sort_key_val(_a, _a )[-1]
return next_scores
class __SCREAMING_SNAKE_CASE ( _A ):
def __init__( self, _a, _a = -float("Inf" ), _a = 1 ) -> Optional[Any]:
if not isinstance(_a, _a ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__SCREAMING_SNAKE_CASE = max(_a, _a )
__SCREAMING_SNAKE_CASE = filter_value
def __call__( self, _a, _a, _a ) -> jnp.ndarray:
__SCREAMING_SNAKE_CASE = scores.shape
__SCREAMING_SNAKE_CASE = jnp.full(batch_size * vocab_size, self.filter_value )
__SCREAMING_SNAKE_CASE = min(self.top_k, scores.shape[-1] ) # Safety check
__SCREAMING_SNAKE_CASE = lax.top_k(_a, _a )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to((jnp.arange(_a ) * vocab_size)[:, None], (batch_size, topk) ).flatten()
__SCREAMING_SNAKE_CASE = topk_scores.flatten()
__SCREAMING_SNAKE_CASE = topk_indices.flatten() + shift
__SCREAMING_SNAKE_CASE = next_scores_flat.at[topk_indices_flat].set(_a )
__SCREAMING_SNAKE_CASE = next_scores_flat.reshape(_a, _a )
return next_scores
class __SCREAMING_SNAKE_CASE ( _A ):
def __init__( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = bos_token_id
def __call__( self, _a, _a, _a ) -> jnp.ndarray:
__SCREAMING_SNAKE_CASE = jnp.full(scores.shape, -float("inf" ) )
__SCREAMING_SNAKE_CASE = 1 - jnp.bool_(cur_len - 1 )
__SCREAMING_SNAKE_CASE = jnp.where(_a, new_scores.at[:, self.bos_token_id].set(0 ), _a )
return scores
class __SCREAMING_SNAKE_CASE ( _A ):
def __init__( self, _a, _a ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = eos_token_id
def __call__( self, _a, _a, _a ) -> jnp.ndarray:
__SCREAMING_SNAKE_CASE = jnp.full(scores.shape, -float("inf" ) )
__SCREAMING_SNAKE_CASE = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__SCREAMING_SNAKE_CASE = jnp.where(_a, new_scores.at[:, self.eos_token_id].set(0 ), _a )
return scores
class __SCREAMING_SNAKE_CASE ( _A ):
def __init__( self, _a, _a ) -> Dict:
if not isinstance(_a, _a ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_a, _a ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__SCREAMING_SNAKE_CASE = min_length
__SCREAMING_SNAKE_CASE = eos_token_id
def __call__( self, _a, _a, _a ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
__SCREAMING_SNAKE_CASE = 1 - jnp.clip(cur_len - self.min_length, 0, 1 )
__SCREAMING_SNAKE_CASE = jnp.where(_a, scores.at[:, self.eos_token_id].set(-float("inf" ) ), _a )
return scores
class __SCREAMING_SNAKE_CASE ( _A ):
def __init__( self, _a, _a ) -> int:
__SCREAMING_SNAKE_CASE = list(_a )
__SCREAMING_SNAKE_CASE = begin_index
def __call__( self, _a, _a, _a ) -> Tuple:
__SCREAMING_SNAKE_CASE = 1 - jnp.bool_(cur_len - self.begin_index )
__SCREAMING_SNAKE_CASE = jnp.where(_a, scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ), _a )
return scores
class __SCREAMING_SNAKE_CASE ( _A ):
def __init__( self, _a ) -> Dict:
__SCREAMING_SNAKE_CASE = list(_a )
def __call__( self, _a, _a, _a ) -> jnp.ndarray:
__SCREAMING_SNAKE_CASE = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class __SCREAMING_SNAKE_CASE ( _A ):
def __init__( self, _a ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = dict(_a )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__SCREAMING_SNAKE_CASE = jnp.ones((max(force_token_map.keys() ) + 1), dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__SCREAMING_SNAKE_CASE = force_token_array.at[index].set(_a )
__SCREAMING_SNAKE_CASE = jnp.intaa(_a )
def __call__( self, _a, _a, _a ) -> jnp.ndarray:
def _force_token(_a ):
__SCREAMING_SNAKE_CASE = scores.shape[0]
__SCREAMING_SNAKE_CASE = self.force_token_array[generation_idx]
__SCREAMING_SNAKE_CASE = jnp.ones_like(_a, dtype=scores.dtype ) * -float("inf" )
__SCREAMING_SNAKE_CASE = jnp.zeros((batch_size, 1), dtype=scores.dtype )
__SCREAMING_SNAKE_CASE = lax.dynamic_update_slice(_a, _a, (0, current_token) )
return new_scores
__SCREAMING_SNAKE_CASE = lax.cond(
cur_len >= self.force_token_array.shape[0], lambda: scores, lambda: lax.cond(
self.force_token_array[cur_len] >= 0, lambda: _force_token(_a ), lambda: scores, ), )
return scores
class __SCREAMING_SNAKE_CASE ( _A ):
def __init__( self, _a, _a, _a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = generate_config.eos_token_id
__SCREAMING_SNAKE_CASE = generate_config.no_timestamps_token_id
__SCREAMING_SNAKE_CASE = generate_config.no_timestamps_token_id + 1
__SCREAMING_SNAKE_CASE = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_a, "max_initial_timestamp_index" ):
__SCREAMING_SNAKE_CASE = generate_config.max_initial_timestamp_index
else:
__SCREAMING_SNAKE_CASE = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__SCREAMING_SNAKE_CASE = model_config.vocab_size
def __call__( self, _a, _a, _a ) -> Dict:
# suppress <|notimestamps|> which is handled by without_timestamps
__SCREAMING_SNAKE_CASE = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(_a, _a ):
__SCREAMING_SNAKE_CASE = jnp.where((cur_len - self.begin_index) >= 1, _a, _a )
__SCREAMING_SNAKE_CASE = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin, True and last_was_timestamp, _a, )
__SCREAMING_SNAKE_CASE = jnp.where((cur_len - self.begin_index) < 2, _a, _a )
__SCREAMING_SNAKE_CASE = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin, _a, _a, )
return jnp.where(
_a, jnp.where(
penultimate_was_timestamp > 0, scores_k.at[self.timestamp_begin :].set(-float("inf" ) ), scores_k.at[: self.eos_token_id].set(-float("inf" ) ), ), _a, )
__SCREAMING_SNAKE_CASE = jax.vmap(_a )(_a, _a )
__SCREAMING_SNAKE_CASE = jnp.where(cur_len == self.begin_index, _a, _a )
__SCREAMING_SNAKE_CASE = jnp.where(
self.max_initial_timestamp_index is not None, True and apply_max_initial_timestamp, _a, )
__SCREAMING_SNAKE_CASE = self.timestamp_begin + self.max_initial_timestamp_index
__SCREAMING_SNAKE_CASE = jnp.where(
_a, scores.at[:, last_allowed + 1 :].set(-float("inf" ) ), _a, )
# if sum of probability over timestamps is above any other token, sample timestamp
__SCREAMING_SNAKE_CASE = jax.nn.log_softmax(_a, axis=-1 )
def handle_cumulative_probs(_a, _a ):
__SCREAMING_SNAKE_CASE = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :], axis=-1 )
__SCREAMING_SNAKE_CASE = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob, scores_k.at[: self.timestamp_begin].set(-float("inf" ) ), _a, )
__SCREAMING_SNAKE_CASE = jax.vmap(_a )(_a, _a )
return scores
| 693 |
from __future__ import annotations
def lowercase_ ( __snake_case : list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
snake_case__ :Union[str, Any] = nums[0]
snake_case__ :List[Any] = 0
for num in nums[1:]:
snake_case__ , snake_case__ :Optional[Any] = (
max_excluding + num,
max(__snake_case , __snake_case ),
)
return max(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod() | 241 | 0 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
for param in module.parameters():
__UpperCAmelCase : List[Any] = False
def _lowercase ( ) -> str:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__UpperCAmelCase : List[str] = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[str] = plt.imshow(lowerCamelCase__ )
fig.axes.get_xaxis().set_visible(lowerCamelCase__ )
fig.axes.get_yaxis().set_visible(lowerCamelCase__ )
plt.show()
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = datetime.now()
__UpperCAmelCase : int = current_time.strftime("%H:%M:%S" )
return timestamp
| 716 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Any = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "ibert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : Tuple = quant_mode
__UpperCAmelCase : Union[str, Any] = force_dequant
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 10 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
__UpperCAmelCase = 42
__UpperCAmelCase = 42
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE) -> None:
_lowerCamelCase : Node | None = None
for i in sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE):
_lowerCamelCase : Optional[int] = Node(SCREAMING_SNAKE_CASE , self.head)
def __iter__( self) -> Iterator[int]:
_lowerCamelCase : Any = self.head
while node:
yield node.data
_lowerCamelCase : Optional[int] = node.next_node
def __len__( self) -> int:
return sum(1 for _ in self)
def __str__( self) -> str:
return " -> ".join([str(SCREAMING_SNAKE_CASE) for node in self])
def _snake_case ( __snake_case : SortedLinkedList , __snake_case : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(__snake_case ) + list(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 88 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: PreTrainedTokenizer , _lowerCamelCase: int , _lowerCamelCase: Optional[int] = None , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if train_file is not None:
__SCREAMING_SNAKE_CASE : Any = [train_file]
if eval_file is not None:
__SCREAMING_SNAKE_CASE : Any = [eval_file]
if test_file is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = [test_file]
__SCREAMING_SNAKE_CASE : Optional[Any] = datasets.load_dataset("""csv""" , data_files=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
__SCREAMING_SNAKE_CASE : Dict = features_name.pop(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = list(set(ds[list(files.keys() )[0]][label_name] ) )
__SCREAMING_SNAKE_CASE : str = {label: i for i, label in enumerate(_lowerCamelCase )}
__SCREAMING_SNAKE_CASE : Any = tokenizer.model_input_names
__SCREAMING_SNAKE_CASE : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
__SCREAMING_SNAKE_CASE : int = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__SCREAMING_SNAKE_CASE : int = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__SCREAMING_SNAKE_CASE : str = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE : int = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__SCREAMING_SNAKE_CASE : Optional[int] = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE : str = labelaid[ex[label_name]]
yield (d, label)
__SCREAMING_SNAKE_CASE : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__SCREAMING_SNAKE_CASE : Dict = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__SCREAMING_SNAKE_CASE : Any = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase__ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : int = field(metadata={'''help''': '''Which column contains the label'''} )
_A : str = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the training file'''} )
_A : Optional[str] = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the development file'''} )
_A : Optional[str] = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the test file'''} )
_A : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_A : bool = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_A : bool = field(default=lowerCamelCase__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase: EvalPrediction ) -> Dict:
__SCREAMING_SNAKE_CASE : List[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__SCREAMING_SNAKE_CASE : List[Any] = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__SCREAMING_SNAKE_CASE : Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate()
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(_lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main() | 578 | 0 |
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a : Optional[List[str]] = None
a : Dict = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a : Optional[Any] = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__lowerCamelCase = True
__lowerCamelCase = None
# Automatically constructed
__lowerCamelCase = "PIL.Image.Image"
__lowerCamelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
__lowerCamelCase = field(default="Image" , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__ )
def __call__( self ):
'''simple docstring'''
return self.pa_type
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(snake_case__ , snake_case__ ):
lowercase__ : Optional[Any]= np.array(snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
return {"path": value, "bytes": None}
elif isinstance(snake_case__ , snake_case__ ):
return {"path": None, "bytes": value}
elif isinstance(snake_case__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(snake_case__ )
elif isinstance(snake_case__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(snake_case__ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
lowercase__ : List[Any]= {}
lowercase__, lowercase__ : str= value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(snake_case__ ):
lowercase__ : int= PIL.Image.open(snake_case__ )
else:
lowercase__ : List[str]= path.split("::" )[-1]
try:
lowercase__ : List[Any]= string_to_dict(snake_case__ , config.HUB_DATASETS_URL )["repo_id"]
lowercase__ : Tuple= token_per_repo_id.get(snake_case__ )
except ValueError:
lowercase__ : List[Any]= None
with xopen(snake_case__ , "rb" , use_auth_token=snake_case__ ) as f:
lowercase__ : Optional[Any]= BytesIO(f.read() )
lowercase__ : Dict= PIL.Image.open(bytes_ )
else:
lowercase__ : Any= PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def UpperCAmelCase_ ( self ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowercase__ : Optional[int]= pa.array([None] * len(snake_case__ ) , type=pa.binary() )
lowercase__ : Optional[int]= pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase__ : Optional[Any]= pa.array([None] * len(snake_case__ ) , type=pa.string() )
lowercase__ : List[str]= pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
lowercase__ : Union[str, Any]= storage.field("bytes" )
else:
lowercase__ : Union[str, Any]= pa.array([None] * len(snake_case__ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
lowercase__ : str= storage.field("path" )
else:
lowercase__ : int= pa.array([None] * len(snake_case__ ) , type=pa.string() )
lowercase__ : List[Any]= pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase__ : Optional[int]= pa.array(
[encode_np_array(np.array(snake_case__ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase__ : List[Any]= pa.array([None] * len(snake_case__ ) , type=pa.string() )
lowercase__ : List[Any]= pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(snake_case__ , self.pa_type )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(snake_case__ ):
with xopen(snake_case__ , "rb" ) as f:
lowercase__ : Optional[int]= f.read()
return bytes_
lowercase__ : List[str]= pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase__ : Optional[int]= pa.array(
[os.path.basename(snake_case__ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
lowercase__ : str= pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(snake_case__ , self.pa_type )
def lowercase__() ->List[str]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase__ : Any= list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowercase__(A ) ->bytes:
"""simple docstring"""
lowercase__ : Union[str, Any]= BytesIO()
if image.format in list_image_compression_formats():
lowercase__ : Dict= image.format
else:
lowercase__ : List[str]= "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(A , format=A )
return buffer.getvalue()
def lowercase__(A ) ->dict:
"""simple docstring"""
if hasattr(A , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(A )}
def lowercase__(A ) ->dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
lowercase__ : List[str]= array.dtype
lowercase__ : Optional[int]= dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
lowercase__ : Optional[int]= dtype.kind
lowercase__ : List[str]= dtype.itemsize
lowercase__ : str= None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase__ : Optional[Any]= np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase__ : Any= dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase__ : List[Any]= dtype_byteorder + dtype_kind + str(A )
lowercase__ : Dict= np.dtype(A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
lowercase__ : Tuple= PIL.Image.fromarray(array.astype(A ) )
return {"path": None, "bytes": image_to_bytes(A )}
def lowercase__(A ) ->List[dict]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
lowercase__, lowercase__ : Any= first_non_null_value(A )
if isinstance(A , A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(A , np.ndarray ):
lowercase__ : Optional[Any]= no_op_if_value_is_null(A )
return [obj_to_image_dict_func(A ) for obj in objs]
elif isinstance(A , PIL.Image.Image ):
lowercase__ : List[str]= no_op_if_value_is_null(A )
return [obj_to_image_dict_func(A ) for obj in objs]
else:
return objs
else:
return objs
| 85 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ ( A , unittest.TestCase ):
__lowerCamelCase = BioGptTokenizer
__lowerCamelCase = False
def _snake_case ( self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ : int =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE_ : List[str] =dict(zip(__A , range(len(__A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
SCREAMING_SNAKE_CASE_ : Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__A ) )
def _snake_case ( self , __A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : str ='''lower newer'''
SCREAMING_SNAKE_CASE_ : List[Any] ='''lower newer'''
return input_text, output_text
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : int =BioGptTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE_ : List[str] ='''lower'''
SCREAMING_SNAKE_CASE_ : List[str] =['''low''', '''er</w>''']
SCREAMING_SNAKE_CASE_ : Dict =tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE_ : int =tokens + ['''<unk>''']
SCREAMING_SNAKE_CASE_ : int =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokenizer.encode('''sequence builders''' , add_special_tokens=__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A )
SCREAMING_SNAKE_CASE_ : Dict =tokenizer.build_inputs_with_special_tokens(__A )
SCREAMING_SNAKE_CASE_ : Optional[int] =tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 443 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( A ):
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , __A , __A ) -> List[str]:
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self , __A = 1 , __A = 50 , __A = None , __A = "pil" , __A = True , **__A , ) -> Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE_ : Dict =self.unet.config.sample_size
SCREAMING_SNAKE_CASE_ : Tuple =(batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE_ : int =self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
SCREAMING_SNAKE_CASE_ : Tuple =randn_tensor(__A , generator=__A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.scheduler.schedule[t]
SCREAMING_SNAKE_CASE_ : List[Any] =self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict =self.scheduler.add_noise_to_input(__A , __A , generator=__A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE_ : Tuple =(sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
SCREAMING_SNAKE_CASE_ : List[Any] =self.scheduler.step(__A , __A , __A , __A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE_ : int =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
SCREAMING_SNAKE_CASE_ : Dict =self.scheduler.step_correct(
__A , __A , __A , __A , step_output.prev_sample , step_output['''derivative'''] , )
SCREAMING_SNAKE_CASE_ : Any =step_output.prev_sample
SCREAMING_SNAKE_CASE_ : List[Any] =(sample / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Any =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 443 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModel.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModel.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModelForPreTraining.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Any ) ->List[str]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4410 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4410 )
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4410 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4410 )
| 43 | '''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase__ : Tuple = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ):
'''simple docstring'''
inspect_dataset(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
inspect_metric(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_names(_UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
| 43 | 1 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
A : Tuple = logging.get_logger(__name__)
A : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
A : Dict = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
A : Optional[Any] = {
'jukebox': 5_1_2,
}
class A ( UpperCamelCase_ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_LYRIC_TOKENS_SIZES
A__ = ["""input_ids""", """attention_mask"""]
def __init__(self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str=["v3", "v2", "v2"] , _UpperCAmelCase : str=512 , _UpperCAmelCase : str=5 , _UpperCAmelCase : Dict="<|endoftext|>" , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
lowercase__ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
super().__init__(
unk_token=a__ , n_genres=a__ , version=a__ , max_n_lyric_tokens=a__ , **a__ , )
lowercase__ = version
lowercase__ = max_n_lyric_tokens
lowercase__ = n_genres
with open(a__ , encoding="""utf-8""" ) as vocab_handle:
lowercase__ = json.load(a__ )
with open(a__ , encoding="""utf-8""" ) as vocab_handle:
lowercase__ = json.load(a__ )
with open(a__ , encoding="""utf-8""" ) as vocab_handle:
lowercase__ = json.load(a__ )
lowercase__ = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
lowercase__ = oov.replace(r"""\-'""" , r"""\-+'""" )
lowercase__ = regex.compile(a__ )
lowercase__ = {v: k for k, v in self.artists_encoder.items()}
lowercase__ = {v: k for k, v in self.genres_encoder.items()}
lowercase__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def lowerCamelCase__ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowerCamelCase__ (self : Any ) -> Tuple:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowerCamelCase__ (self : int , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [self.artists_encoder.get(a__ , 0 ) for artist in list_artists]
for genres in range(len(a__ ) ):
lowercase__ = [self.genres_encoder.get(a__ , 0 ) for genre in list_genres[genres]]
lowercase__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowercase__ = [[self.lyrics_encoder.get(a__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return list(a__ )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , **_UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.prepare_for_tokenization(a__ , a__ , a__ )
lowercase__ = self._tokenize(a__ )
return artist, genre, lyrics
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> List[Any]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowercase__ = artists[idx].lower()
lowercase__ = [genres[idx].lower()]
else:
lowercase__ = self._normalize(artists[idx] ) + ".v2"
lowercase__ = [
self._normalize(a__ ) + ".v2" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowercase__ = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
lowercase__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
lowercase__ = {vocab[index]: index + 1 for index in range(len(a__ ) )}
lowercase__ = 0
lowercase__ = len(a__ ) + 1
lowercase__ = self.vocab
lowercase__ = {v: k for k, v in self.vocab.items()}
lowercase__ = ""
else:
lowercase__ = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
lowercase__ = self._run_strip_accents(a__ )
lowercase__ = lyrics.replace("""\\""" , """\n""" )
lowercase__ = self.out_of_vocab.sub("""""" , a__ ), [], []
return artists, genres, lyrics
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
lowercase__ = unicodedata.normalize("""NFD""" , a__ )
lowercase__ = []
for char in text:
lowercase__ = unicodedata.category(a__ )
if cat == "Mn":
continue
output.append(a__ )
return "".join(a__ )
def lowerCamelCase__ (self : int , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
lowercase__ = (
[chr(a__ ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(a__ ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(a__ ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["."]
)
lowercase__ = frozenset(a__ )
lowercase__ = re.compile(r"""_+""" )
lowercase__ = "".join([c if c in accepted else """_""" for c in text.lower()] )
lowercase__ = pattern.sub("""_""" , a__ ).strip("""_""" )
return text
def lowerCamelCase__ (self : Any , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return " ".join(a__ )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = False ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(a__ , a__ ):
lowercase__ = TensorType(a__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
lowercase__ = tf.constant
lowercase__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
lowercase__ = torch.tensor
lowercase__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
lowercase__ = jnp.array
lowercase__ = _is_jax
else:
lowercase__ = np.asarray
lowercase__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowercase__ = [inputs]
if not is_tensor(a__ ):
lowercase__ = as_tensor(a__ )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__(self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple="" , _UpperCAmelCase : Tuple="pt" ) -> Tuple:
"""simple docstring"""
lowercase__ = [0, 0, 0]
lowercase__ = [artist] * len(self.version )
lowercase__ = [genres] * len(self.version )
lowercase__ = self.tokenize(a__ , a__ , a__ )
lowercase__ = self._convert_token_to_id(a__ , a__ , a__ )
lowercase__ = [-INFINITY] * len(full_tokens[-1] )
lowercase__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=a__ )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Optional[Any]:
"""simple docstring"""
if not os.path.isdir(a__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=a__ ) )
lowercase__ = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=a__ ) )
lowercase__ = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=a__ ) )
return (artists_file, genres_file, lyrics_file)
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.artists_decoder.get(a__ )
lowercase__ = [self.genres_decoder.get(a__ ) for genre in genres_index]
lowercase__ = [self.lyrics_decoder.get(a__ ) for character in lyric_index]
return artist, genres, lyrics
| 15 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCAmelCase :
def __init__( self : str , a__ : Union[str, Any] , a__ : Union[str, Any]=13 , a__ : Union[str, Any]=7 , a__ : Any=True , a__ : str=True , a__ : Optional[Any]=True , a__ : Any=99 , a__ : Dict=32 , a__ : Tuple=5 , a__ : List[Any]=4 , a__ : List[Any]=37 , a__ : List[str]="gelu" , a__ : Dict=0.1 , a__ : str=0.1 , a__ : str=512 , a__ : List[str]=16 , a__ : Dict=2 , a__ : Union[str, Any]=0.02 , a__ : Dict=3 , a__ : List[Any]=4 , a__ : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : int = is_training
lowerCAmelCase__ : int = use_token_type_ids
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = max_position_embeddings
lowerCAmelCase__ : str = type_vocab_size
lowerCAmelCase__ : List[Any] = type_sequence_label_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Tuple = num_labels
lowerCAmelCase__ : Optional[Any] = num_choices
lowerCAmelCase__ : int = scope
lowerCAmelCase__ : List[str] = self.vocab_size - 1
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Dict = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Any = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A ( self : Dict , a__ : Any , a__ : Any , a__ : int , a__ : str , *a__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = OpenAIGPTModel(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a__ , token_type_ids=a__ , head_mask=a__ )
lowerCAmelCase__ : Optional[int] = model(a__ , token_type_ids=a__ )
lowerCAmelCase__ : Any = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : Optional[int] , a__ : List[Any] , a__ : int , a__ : Dict , a__ : Any , *a__ : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = OpenAIGPTLMHeadModel(a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Dict = model(a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : List[str] , a__ : str , a__ : Dict , a__ : Union[str, Any] , a__ : int , *a__ : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = OpenAIGPTDoubleHeadsModel(a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : Any , a__ : Any , a__ : Union[str, Any] , a__ : Optional[int] , a__ : Any , *a__ : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : Union[str, Any] = OpenAIGPTForSequenceClassification(a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Tuple = model(a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : str = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
A_ : str = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A_ : Any = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A_ : int = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A ( self : List[Any] , a__ : Optional[Any] , a__ : Optional[int] , a__ : Optional[Any] , a__ : List[Any] , a__ : int ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _A ( self : Dict , a__ : Optional[int] , a__ : List[Any] , a__ : List[Any]=False ):
'''simple docstring'''
lowerCAmelCase__ : Any = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase__ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a__ , )
lowerCAmelCase__ : Union[str, Any] = inputs_dict["labels"]
lowerCAmelCase__ : int = inputs_dict["labels"]
lowerCAmelCase__ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a__ , )
lowerCAmelCase__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
return inputs_dict
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = OpenAIGPTModelTester(self )
lowerCAmelCase__ : Any = ConfigTester(self , config_class=a__ , n_embd=37 )
def _A ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a__ )
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a__ )
def _A ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a__ )
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a__ )
@slow
def _A ( self : Tuple ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Any = OpenAIGPTModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a__ )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a__ ) # the president is
lowerCAmelCase__ : Optional[int] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase__ : Optional[int] = model.generate(a__ , do_sample=a__ )
self.assertListEqual(output_ids[0].tolist() , a__ )
| 378 | 0 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int , snake_case_ : Any , snake_case_ : List[str] ) -> Dict:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Optional[int]=True ) -> List[Any]:
'''simple docstring'''
model.train()
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Optional[int]=False ) -> Tuple:
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ = RegressionModel()
UpperCAmelCase_ = deepcopy(snake_case_ )
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : List[Any]=False ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
GradientState._reset_state()
def lowerCAmelCase_ ( snake_case_ : List[str]=False , snake_case_ : Union[str, Any]=False ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ = RegressionDataset(length=96 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if iteration < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if batch_num < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(snake_case_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(snake_case_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case_ , snake_case_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 717 | '''simple docstring'''
from collections import defaultdict
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
UpperCAmelCase_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(snake_case_ )
if ret % 2 == 0:
cuts.append(snake_case_ )
return ret
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Dict =10, 9
SCREAMING_SNAKE_CASE_: Union[str, Any] =defaultdict(list)
SCREAMING_SNAKE_CASE_: dict[int, bool] ={}
SCREAMING_SNAKE_CASE_: list[int] =[]
SCREAMING_SNAKE_CASE_: Dict =0
SCREAMING_SNAKE_CASE_: Any =[(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 415 | 0 |
from __future__ import annotations
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase :Any = data
UpperCamelCase :Node | None = None
UpperCamelCase :Node | None = None
def _A ( SCREAMING_SNAKE_CASE__ : Node | None ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _A ( SCREAMING_SNAKE_CASE__ : Node | None ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _A ( SCREAMING_SNAKE_CASE__ : Node ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _A ( ): # Main function for testing.
UpperCamelCase :Union[str, Any] = Node(1 )
UpperCamelCase :int = Node(2 )
UpperCamelCase :Optional[Any] = Node(3 )
UpperCamelCase :int = Node(4 )
UpperCamelCase :int = Node(5 )
UpperCamelCase :Tuple = Node(6 )
UpperCamelCase :Dict = Node(7 )
UpperCamelCase :Dict = Node(8 )
UpperCamelCase :Tuple = Node(9 )
print(is_full_binary_tree(_a ) )
print(depth_of_tree(_a ) )
print('''Tree is: ''' )
display(_a )
if __name__ == "__main__":
main()
| 658 |
import comet # From: unbabel-comet
import torch
import datasets
lowercase : List[Any] = datasets.logging.get_logger(__name__)
lowercase : List[str] = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
lowercase : Dict = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
lowercase : Tuple = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
if self.config_name == "default":
snake_case_ : Union[str, Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
snake_case_ : int = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> Dict:
if gpus is None:
snake_case_ : Union[str, Any] = 1 if torch.cuda.is_available() else 0
snake_case_ : str = {"src": sources, "mt": predictions, "ref": references}
snake_case_ : Dict = [dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for t in zip(*data.values() )]
snake_case_ , snake_case_ : Union[str, Any] = self.scorer.predict(_SCREAMING_SNAKE_CASE , gpus=_SCREAMING_SNAKE_CASE , progress_bar=_SCREAMING_SNAKE_CASE )
return {"mean_score": mean_score, "scores": scores}
| 568 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def a():
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=lowercase__ , default=lowercase__ , required=lowercase__ , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=lowercase__ , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=lowercase__ , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=lowercase__ , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=lowercase__ , default=0 , help='cuda_id.' , )
snake_case_ = parser.parse_args()
return args
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if not len(lowercase__ ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
snake_case_ , snake_case_ = imgs[0].size
snake_case_ = Image.new('RGB' , size=(cols * w, rows * h) )
snake_case_ , snake_case_ = grid.size
for i, img in enumerate(lowercase__ ):
grid.paste(lowercase__ , box=(i % cols * w, i // cols * h) )
return grid
def a(lowercase__ , lowercase__="robotic cat with wings" , lowercase__=7.5 , lowercase__=50 , lowercase__=1 , lowercase__=42 , ):
'''simple docstring'''
snake_case_ = torch.Generator(pipeline.device ).manual_seed(lowercase__ )
snake_case_ = pipeline(
lowercase__ , guidance_scale=lowercase__ , num_inference_steps=lowercase__ , generator=lowercase__ , num_images_per_prompt=lowercase__ , ).images
snake_case_ = int(math.sqrt(lowercase__ ) )
snake_case_ = image_grid(lowercase__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
A = parse_args()
# Load models and create wrapper for stable diffusion
A = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
A = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
A = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
A = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
A = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
A = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
A = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
A = unet.to(torch.device('cuda', args.cuda_id))
A = pipeline.to(unet.device)
A , A = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
A = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 46 |
import operator as op
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = []
snake_case_ = lambda lowercase__ , lowercase__ : int(x / y ) # noqa: E731 integer division operation
snake_case_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(lowercase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
else:
snake_case_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
snake_case_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
stack.append(
str(opr[x](int(lowercase__ ) , int(lowercase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
A = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 46 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Dict = BioGptTokenizer
lowerCamelCase_ : int = False
def __UpperCAmelCase( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
__A : int = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
__A : Optional[Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(lowerCAmelCase_ ) )
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : Any = "lower newer"
__A : Optional[int] = "lower newer"
return input_text, output_text
def __UpperCAmelCase( self ):
__A : Any = BioGptTokenizer(self.vocab_file , self.merges_file )
__A : Tuple = "lower"
__A : str = ["low", "er</w>"]
__A : int = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__A : List[Any] = tokens + ["<unk>"]
__A : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
@slow
def __UpperCAmelCase( self ):
__A : Any = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__A : int = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ )
__A : Optional[int] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ )
__A : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__A : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 520 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""pixel_values"""]
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCAmelCase = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
__lowerCAmelCase = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
lowerCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[str] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : str , ) -> BatchFeature:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 53 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def A__ (self):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(UpperCAmelCase):
__UpperCAmelCase =AutoConfig.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =FlaxAutoModel.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
@slow
def A__ (self):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(UpperCAmelCase):
__UpperCAmelCase =AutoConfig.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =FlaxAutoModel.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
@slow
def A__ (self):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__UpperCAmelCase =AutoTokenizer.from_pretrained(UpperCAmelCase)
__UpperCAmelCase =FlaxBertModel.from_pretrained(UpperCAmelCase)
__UpperCAmelCase =tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**UpperCAmelCase):
return model(**UpperCAmelCase)
eval(**UpperCAmelCase).block_until_ready()
@slow
def A__ (self):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
__UpperCAmelCase =AutoTokenizer.from_pretrained(UpperCAmelCase)
__UpperCAmelCase =FlaxRobertaModel.from_pretrained(UpperCAmelCase)
__UpperCAmelCase =tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**UpperCAmelCase):
return model(**UpperCAmelCase)
eval(**UpperCAmelCase).block_until_ready()
def A__ (self):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''bert-base is not a local folder and is not a valid model identifier'''):
__UpperCAmelCase =FlaxAutoModel.from_pretrained('''bert-base''')
def A__ (self):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
__UpperCAmelCase =FlaxAutoModel.from_pretrained(UpperCAmelCase , revision='''aaaaaa''')
def A__ (self):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
__UpperCAmelCase =FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''')
def A__ (self):
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase , '''Use `from_pt=True` to load this model'''):
__UpperCAmelCase =FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''')
| 142 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
UpperCamelCase_ = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Optional[int]:
if isinstance(snake_case__ , torch.Tensor ):
return image
elif isinstance(snake_case__ , PIL.Image.Image ):
__UpperCAmelCase =[image]
__UpperCAmelCase =[trans(img.convert('''RGB''' ) ) for img in image]
__UpperCAmelCase =torch.stack(snake_case__ )
return image
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__(self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCAmelCase =DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""")
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =min(int(num_inference_steps * strength) , UpperCAmelCase)
__UpperCAmelCase =max(num_inference_steps - init_timestep , 0)
__UpperCAmelCase =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None):
'''simple docstring'''
if not isinstance(UpperCAmelCase , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase)}""")
__UpperCAmelCase =image.to(device=UpperCAmelCase , dtype=UpperCAmelCase)
if isinstance(UpperCAmelCase , UpperCAmelCase) and len(UpperCAmelCase) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(UpperCAmelCase)}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""")
__UpperCAmelCase =init_latents.shape
__UpperCAmelCase =randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase , dtype=UpperCAmelCase)
# get latents
print('''add noise to latents at timestep''' , UpperCAmelCase)
__UpperCAmelCase =self.scheduler.add_noise(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =init_latents
return latents
@torch.no_grad()
def __call__(self , UpperCAmelCase = None , UpperCAmelCase = 0.8 , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = 0.0 , UpperCAmelCase = 5_0 , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , ):
'''simple docstring'''
self.check_inputs(UpperCAmelCase)
# 2. Preprocess image
__UpperCAmelCase =preprocess(UpperCAmelCase)
# 3. set timesteps
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device)
__UpperCAmelCase , __UpperCAmelCase =self.get_timesteps(UpperCAmelCase , UpperCAmelCase , self.device)
__UpperCAmelCase =timesteps[:1].repeat(UpperCAmelCase)
# 4. Prepare latent variables
__UpperCAmelCase =self.prepare_latents(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.unet.dtype , self.device , UpperCAmelCase)
__UpperCAmelCase =latents
# 5. Denoising loop
for t in self.progress_bar(UpperCAmelCase):
# 1. predict noise model_output
__UpperCAmelCase =self.unet(UpperCAmelCase , UpperCAmelCase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCAmelCase =self.scheduler.step(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , eta=UpperCAmelCase , use_clipped_model_output=UpperCAmelCase , generator=UpperCAmelCase , ).prev_sample
__UpperCAmelCase =(image / 2 + 0.5).clamp(0 , 1)
__UpperCAmelCase =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__UpperCAmelCase =self.numpy_to_pil(UpperCAmelCase)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCAmelCase)
| 142 | 1 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'xlm-prophetnet'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 30522 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 128 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = num_encoder_layers
lowerCamelCase_ = num_encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = num_decoder_layers
lowerCamelCase_ = num_decoder_attention_heads
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = init_std # Normal(0, this parameter)
lowerCamelCase_ = activation_function
# parameters for xlmprophetnet
lowerCamelCase_ = ngram
lowerCamelCase_ = num_buckets
lowerCamelCase_ = relative_max_distance
lowerCamelCase_ = disable_ngram_loss
lowerCamelCase_ = eps
# 3 Types of Dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = dropout
lowerCamelCase_ = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 42 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = EsmModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = EsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase_ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.empty(2 , 4 , 30 )
lowerCamelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase_ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = 33
lowerCamelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 | 1 |
def snake_case__ ( a ) -> str:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
snake_case__ = grid[0]
for row_n in range(1 , len(_UpperCAmelCase ) ):
snake_case__ = grid[row_n]
snake_case__ = fill_row(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ = grid[row_n]
return grid[-1][-1]
def snake_case__ ( a , a ) -> Tuple:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(_UpperCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 716 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''spiece.model'''}
a__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
a__ = {'''bert_for_seq_generation''': 512}
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[int] = []
UpperCAmelCase_ : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Any="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Union[str, Any]="<pad>" , __UpperCamelCase : Optional[Any]="<::::>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Dict , ):
'''simple docstring'''
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sep_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
snake_case__ = vocab_file
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
'''simple docstring'''
snake_case__ = self.__dict__.copy()
snake_case__ = None
return state
def __setstate__( self : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase( self : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __lowerCAmelCase( self : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
return self.sp_model.piece_to_id(__UpperCamelCase )
def __lowerCAmelCase( self : str , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case__ = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def __lowerCAmelCase( self : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case__ = []
snake_case__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
snake_case__ = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def __lowerCAmelCase( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,) | 566 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase_ : int = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :int ) -> Dict:
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__snake_case ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(__snake_case )
a__ = resnets
a__ = attentions
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :Dict ,__snake_case :str ,__snake_case :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple=True ) -> Tuple:
a__ = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
a__ = attn(__snake_case ,__snake_case ,deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__snake_case ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = resnets
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :Optional[Any] ,__snake_case :str ,__snake_case :Dict ,__snake_case :Any=True ) -> List[Any]:
a__ = ()
for resnet in self.resnets:
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Tuple ) -> List[str]:
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(__snake_case )
a__ = resnets
a__ = attentions
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :List[str] ,__snake_case :int ,__snake_case :List[Any] ,__snake_case :Union[str, Any] ,__snake_case :Union[str, Any] ,__snake_case :Dict=True ) -> int:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
a__ = attn(__snake_case ,__snake_case ,deterministic=__snake_case )
if self.add_upsample:
a__ = self.upsamplers_a(__snake_case )
return hidden_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = resnets
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Optional[Any] ,__snake_case :Optional[Any]=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
if self.add_upsample:
a__ = self.upsamplers_a(__snake_case )
return hidden_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Tuple ) -> List[Any]:
# there is always at least one resnet
a__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
a__ = []
for _ in range(self.num_layers ):
a__ = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(__snake_case )
a__ = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = resnets
a__ = attentions
def __call__( self :Optional[Any] ,__snake_case :Union[str, Any] ,__snake_case :List[str] ,__snake_case :int ,__snake_case :int=True ) -> str:
a__ = self.resnets[0](__snake_case ,__snake_case )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
a__ = attn(__snake_case ,__snake_case ,deterministic=__snake_case )
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
return hidden_states
| 335 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class lowercase__ :
"""simple docstring"""
__UpperCamelCase : List[str] = MBartConfig
__UpperCamelCase : Any = {}
__UpperCamelCase : Tuple = 'gelu'
def __init__( self : Optional[int] , __a : Tuple , __a : Dict=1_3 , __a : Tuple=7 , __a : str=True , __a : Optional[Any]=False , __a : Union[str, Any]=9_9 , __a : Dict=3_2 , __a : Any=2 , __a : Tuple=4 , __a : List[Any]=3_7 , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : List[Any]=2_0 , __a : Dict=2 , __a : str=1 , __a : Dict=0 , ):
snake_case__ : Optional[int] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Optional[int] = seq_length
snake_case__ : Dict = is_training
snake_case__ : Dict = use_labels
snake_case__ : List[Any] = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : Optional[int] = max_position_embeddings
snake_case__ : str = eos_token_id
snake_case__ : Any = pad_token_id
snake_case__ : List[str] = bos_token_id
def lowercase ( self : Optional[int] ):
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ : Any = prepare_mbart_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def lowercase ( self : int , __a : Dict , __a : Dict ):
snake_case__ : Dict = TFMBartModel(config=__lowerCamelCase ).get_decoder()
snake_case__ : Any = inputs_dict["input_ids"]
snake_case__ : Optional[int] = input_ids[:1, :]
snake_case__ : Any = inputs_dict["attention_mask"][:1, :]
snake_case__ : List[str] = inputs_dict["head_mask"]
snake_case__ : Dict = 1
# first forward pass
snake_case__ : str = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
snake_case__ : Any = outputs.to_tuple()
snake_case__ : Optional[int] = past_key_values[1]
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , ):
"""simple docstring"""
if attention_mask is None:
snake_case__ : List[str] = tf.cast(tf.math.not_equal(_A , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
snake_case__ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
snake_case__ : int = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
snake_case__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
snake_case__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase__ (lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : int = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCamelCase : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : Any = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = False
__UpperCamelCase : Tuple = False
def lowercase ( self : Dict , __a : List[str] , __a : str , __a : str , __a : List[Any] , __a : Tuple ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowercase ( self : Optional[int] ):
snake_case__ : List[str] = TFMBartModelTester(self )
snake_case__ : str = ConfigTester(self , config_class=__lowerCamelCase )
def lowercase ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = [
' UN Chief Says There Is No Military Solution in Syria',
]
__UpperCamelCase : Optional[int] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
__UpperCamelCase : Tuple = 'facebook/mbart-large-en-ro'
@cached_property
def lowercase ( self : Optional[int] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowercase ( self : List[Any] ):
snake_case__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowercase ( self : Any , **__a : Tuple ):
snake_case__ : Tuple = self.translate_src_text(**__lowerCamelCase )
self.assertListEqual(self.expected_text , __lowerCamelCase )
def lowercase ( self : Optional[int] , **__a : List[str] ):
snake_case__ : Optional[Any] = self.tokenizer(self.src_text , **__lowerCamelCase , return_tensors="""tf""" )
snake_case__ : Tuple = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
snake_case__ : List[Any] = self.tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
return generated_words
@slow
def lowercase ( self : Optional[int] ):
self._assert_generated_batch_equal_expected()
| 705 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
snake_case__ : Dict = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""")
if "img_encoder.patch_embed.proj" in name:
snake_case__ : Optional[Any] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""")
if "img_encoder.patch_embed.norm" in name:
snake_case__ : Dict = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""")
if "img_encoder.layers" in name:
snake_case__ : List[str] = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""")
if "blocks" in name and "res" not in name:
snake_case__ : Optional[int] = name.replace("""blocks""" , """layers""")
if "attn" in name and "pre_assign" not in name:
snake_case__ : str = name.replace("""attn""" , """self_attn""")
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case__ : Optional[int] = name.replace("""proj""" , """out_proj""")
if "pre_assign_attn.attn.proj" in name:
snake_case__ : Optional[int] = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""")
if "norm1" in name:
snake_case__ : Union[str, Any] = name.replace("""norm1""" , """layer_norm1""")
if "norm2" in name and "pre_assign" not in name:
snake_case__ : List[str] = name.replace("""norm2""" , """layer_norm2""")
if "img_encoder.norm" in name:
snake_case__ : Any = name.replace("""img_encoder.norm""" , """vision_model.layernorm""")
# text encoder
if "text_encoder.token_embedding" in name:
snake_case__ : Optional[Any] = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""")
if "text_encoder.positional_embedding" in name:
snake_case__ : int = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""")
if "text_encoder.transformer.resblocks." in name:
snake_case__ : Tuple = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""")
if "ln_1" in name:
snake_case__ : Optional[Any] = name.replace("""ln_1""" , """layer_norm1""")
if "ln_2" in name:
snake_case__ : Optional[int] = name.replace("""ln_2""" , """layer_norm2""")
if "c_fc" in name:
snake_case__ : int = name.replace("""c_fc""" , """fc1""")
if "c_proj" in name:
snake_case__ : List[str] = name.replace("""c_proj""" , """fc2""")
if "text_encoder" in name:
snake_case__ : Dict = name.replace("""text_encoder""" , """text_model""")
if "ln_final" in name:
snake_case__ : Union[str, Any] = name.replace("""ln_final""" , """final_layer_norm""")
# projection layers
if "img_projector.linear_hidden." in name:
snake_case__ : int = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""")
if "img_projector.linear_out." in name:
snake_case__ : Optional[Any] = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""")
if "text_projector.linear_hidden" in name:
snake_case__ : Optional[int] = name.replace("""text_projector.linear_hidden""" , """text_projection""")
if "text_projector.linear_out" in name:
snake_case__ : Tuple = name.replace("""text_projector.linear_out""" , """text_projection.3""")
return name
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case__ : List[Any] = orig_state_dict.pop(UpperCAmelCase_)
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case__ : Optional[Any] = key.split(""".""")
snake_case__ , snake_case__ : str = int(key_split[2]), int(key_split[4])
snake_case__ : Tuple = config.vision_config.hidden_size
if "weight" in key:
snake_case__ : Tuple = val[:dim, :]
snake_case__ : List[Any] = val[dim : dim * 2, :]
snake_case__ : Optional[int] = val[-dim:, :]
else:
snake_case__ : Dict = val[:dim]
snake_case__ : List[Any] = val[dim : dim * 2]
snake_case__ : Union[str, Any] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case__ : List[str] = key.split(""".""")
snake_case__ : str = int(key_split[3])
snake_case__ : Tuple = config.text_config.hidden_size
if "weight" in key:
snake_case__ : Any = val[:dim, :]
snake_case__ : Optional[int] = val[
dim : dim * 2, :
]
snake_case__ : List[str] = val[-dim:, :]
else:
snake_case__ : Tuple = val[:dim]
snake_case__ : List[str] = val[dim : dim * 2]
snake_case__ : Optional[int] = val[-dim:]
else:
snake_case__ : List[str] = rename_key(UpperCAmelCase_)
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case__ : Union[str, Any] = val.squeeze_()
else:
snake_case__ : List[str] = val
return orig_state_dict
def _lowercase ( ):
"""simple docstring"""
snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Any = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_).raw)
return im
@torch.no_grad()
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="groupvit-gcc-yfcc" , UpperCAmelCase_=False):
"""simple docstring"""
snake_case__ : Any = GroupViTConfig()
snake_case__ : List[Any] = GroupViTModel(UpperCAmelCase_).eval()
snake_case__ : List[str] = torch.load(UpperCAmelCase_ , map_location="""cpu""")["""model"""]
snake_case__ : List[Any] = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_)
snake_case__ , snake_case__ : Tuple = model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_)
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase_) == 0)
# verify result
snake_case__ : Optional[Any] = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""")
snake_case__ : str = prepare_img()
snake_case__ : int = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="""pt""")
with torch.no_grad():
snake_case__ : Dict = model(**UpperCAmelCase_)
if model_name == "groupvit-gcc-yfcc":
snake_case__ : List[str] = torch.tensor([[13.3523, 6.3629]])
elif model_name == "groupvit-gcc-redcaps":
snake_case__ : List[str] = torch.tensor([[16.1873, 8.6230]])
else:
raise ValueError(F'Model name {model_name} not supported.')
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase_ , atol=1e-3)
processor.save_pretrained(UpperCAmelCase_)
model.save_pretrained(UpperCAmelCase_)
print("""Successfully saved processor and model to""" , UpperCAmelCase_)
if push_to_hub:
print("""Pushing to the hub...""")
processor.push_to_hub(UpperCAmelCase_ , organization="""nielsr""")
model.push_to_hub(UpperCAmelCase_ , organization="""nielsr""")
if __name__ == "__main__":
lowercase_: List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
lowercase_: Any = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 127 | 0 |
from typing import Any
class a :
def __init__( self : int , snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = data
__lowerCAmelCase = None
class a :
def __init__( self : Tuple ):
"""simple docstring"""
__lowerCAmelCase = None
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__lowerCAmelCase = self.head
while temp is not None:
print(temp.data , end=" " )
__lowerCAmelCase = temp.next
print()
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = Node(snake_case__ )
__lowerCAmelCase = self.head
__lowerCAmelCase = new_node
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
__lowerCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
__lowerCAmelCase = node_a.next
__lowerCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
__lowerCAmelCase = node_a.next
if node_a is None or node_a is None:
return
__lowerCAmelCase , __lowerCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
UpperCamelCase_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 611 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
UpperCamelCase_ = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
UpperCamelCase_ = {
"ctrl": 2_5_6,
}
UpperCamelCase_ = {
"Pregnancy": 1_6_8_6_2_9,
"Christianity": 7_6_7_5,
"Explain": 1_0_6_4_2_3,
"Fitness": 6_3_4_4_0,
"Saving": 6_3_1_6_3,
"Ask": 2_7_1_7_1,
"Ass": 9_5_9_8_5,
"Joke": 1_6_3_5_0_9,
"Questions": 4_5_6_2_2,
"Thoughts": 4_9_6_0_5,
"Retail": 5_2_3_4_2,
"Feminism": 1_6_4_3_3_8,
"Writing": 1_1_9_9_2,
"Atheism": 1_9_2_2_6_3,
"Netflix": 4_8_6_1_6,
"Computing": 3_9_6_3_9,
"Opinion": 4_3_2_1_3,
"Alone": 4_4_9_6_7,
"Funny": 5_8_9_1_7,
"Gaming": 4_0_3_5_8,
"Human": 4_0_8_8,
"India": 1_3_3_1,
"Joker": 7_7_1_3_8,
"Diet": 3_6_2_0_6,
"Legal": 1_1_8_5_9,
"Norman": 4_9_3_9,
"Tip": 7_2_6_8_9,
"Weight": 5_2_3_4_3,
"Movies": 4_6_2_7_3,
"Running": 2_3_4_2_5,
"Science": 2_0_9_0,
"Horror": 3_7_7_9_3,
"Confession": 6_0_5_7_2,
"Finance": 1_2_2_5_0,
"Politics": 1_6_3_6_0,
"Scary": 1_9_1_9_8_5,
"Support": 1_2_6_5_4,
"Technologies": 3_2_5_1_6,
"Teenage": 6_6_1_6_0,
"Event": 3_2_7_6_9,
"Learned": 6_7_4_6_0,
"Notion": 1_8_2_7_7_0,
"Wikipedia": 3_7_5_8_3,
"Books": 6_6_6_5,
"Extract": 7_6_0_5_0,
"Confessions": 1_0_2_7_0_1,
"Conspiracy": 7_5_9_3_2,
"Links": 6_3_6_7_4,
"Narcissus": 1_5_0_4_2_5,
"Relationship": 5_4_7_6_6,
"Relationships": 1_3_4_7_9_6,
"Reviews": 4_1_6_7_1,
"News": 4_2_5_6,
"Translation": 2_6_8_2_0,
"multilingual": 1_2_8_4_0_6,
}
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
__lowerCAmelCase = set(UpperCamelCase )
return pairs
class a ( __UpperCAmelCase ):
lowercase_ : str = VOCAB_FILES_NAMES
lowercase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Dict = CONTROL_CODES
def __init__( self : Dict , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : List[Any]="<unk>" , **snake_case__ : Dict ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(snake_case__ )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split("\n" )[1:-1]
__lowerCAmelCase = [tuple(merge.split() ) for merge in merges]
__lowerCAmelCase = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
__lowerCAmelCase = {}
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return len(self.encoder )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[str] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = tuple(snake_case__ )
__lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__lowerCAmelCase = get_pairs(snake_case__ )
if not pairs:
return token
while True:
__lowerCAmelCase = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(snake_case__ ):
try:
__lowerCAmelCase = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(snake_case__ )
__lowerCAmelCase = new_word
if len(snake_case__ ) == 1:
break
else:
__lowerCAmelCase = get_pairs(snake_case__ )
__lowerCAmelCase = "@@ ".join(snake_case__ )
__lowerCAmelCase = word[:-4]
__lowerCAmelCase = word
return word
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Tuple ):
"""simple docstring"""
__lowerCAmelCase = []
__lowerCAmelCase = re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def UpperCAmelCase__ ( self : int , snake_case__ : List[str] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Any , snake_case__ : int ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[str] ):
"""simple docstring"""
__lowerCAmelCase = " ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
__lowerCAmelCase = 0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
__lowerCAmelCase = token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 611 | 1 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = get_activation("swish" )
self.assertIsInstance(lowerCamelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = get_activation("silu" )
self.assertIsInstance(lowerCamelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = get_activation("mish" )
self.assertIsInstance(lowerCamelCase_ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = get_activation("gelu" )
self.assertIsInstance(lowerCamelCase_ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 589 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCamelCase_ :
__lowercase : List[str]
__lowercase : Optional[str] = None
# Automatically constructed
__lowercase : ClassVar[str] = "dict"
__lowercase : ClassVar[Any] = None
__lowercase : str = field(default="Translation" , init=lowercase , repr=lowercase )
def __call__( self ) -> str:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class lowerCamelCase_ :
__lowercase : Optional[List] = None
__lowercase : Optional[int] = None
__lowercase : Optional[str] = None
# Automatically constructed
__lowercase : ClassVar[str] = "dict"
__lowercase : ClassVar[Any] = None
__lowercase : str = field(default="TranslationVariableLanguages" , init=lowercase , repr=lowercase )
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = sorted(set(self.languages ) ) if self.languages else None
_UpperCamelCase = len(self.languages ) if self.languages else None
def __call__( self ) -> List[str]:
"""simple docstring"""
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def lowercase ( self , lowerCamelCase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase = set(self.languages )
if self.languages and set(lowerCamelCase_ ) - lang_set:
raise ValueError(
f'''Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_UpperCamelCase = []
for lang, text in translation_dict.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_UpperCamelCase , _UpperCamelCase = zip(*sorted(lowerCamelCase_ ) )
return {"language": languages, "translation": translations}
def lowercase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 589 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = """efficientnet"""
def __init__( self : List[Any] , __UpperCamelCase : int = 3 , __UpperCamelCase : int = 600 , __UpperCamelCase : float = 2.0 , __UpperCamelCase : float = 3.1 , __UpperCamelCase : int = 8 , __UpperCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __UpperCamelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __UpperCamelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __UpperCamelCase : List[int] = [] , __UpperCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __UpperCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __UpperCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __UpperCamelCase : float = 0.25 , __UpperCamelCase : str = "swish" , __UpperCamelCase : int = 2_560 , __UpperCamelCase : str = "mean" , __UpperCamelCase : float = 0.02 , __UpperCamelCase : float = 0.001 , __UpperCamelCase : float = 0.99 , __UpperCamelCase : float = 0.5 , __UpperCamelCase : float = 0.2 , **__UpperCamelCase : List[Any] , ):
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(__UpperCamelCase ) * 4
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[str] = version.parse("""1.11""")
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return 1e-5
| 684 |
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = set({"(", "[", "{"} )
_UpperCAmelCase = set({")", "]", "}"} )
_UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , "is balanced" )
else:
print(_lowerCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 684 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__UpperCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowercase__ ( unittest.TestCase):
UpperCamelCase_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __A ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ZeroShotClassificationPipeline(
model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __A ( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(__lowerCAmelCase , {'''sequence''': ANY(__lowerCAmelCase ), '''labels''': [ANY(__lowerCAmelCase )], '''scores''': [ANY(__lowerCAmelCase )]} )
# No kwarg
SCREAMING_SNAKE_CASE : Any = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(__lowerCAmelCase , {'''sequence''': ANY(__lowerCAmelCase ), '''labels''': [ANY(__lowerCAmelCase )], '''scores''': [ANY(__lowerCAmelCase )]} )
SCREAMING_SNAKE_CASE : List[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(__lowerCAmelCase , {'''sequence''': ANY(__lowerCAmelCase ), '''labels''': [ANY(__lowerCAmelCase )], '''scores''': [ANY(__lowerCAmelCase )]} )
SCREAMING_SNAKE_CASE : str = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
__lowerCAmelCase , {'''sequence''': ANY(__lowerCAmelCase ), '''labels''': [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )], '''scores''': [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
SCREAMING_SNAKE_CASE : int = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
__lowerCAmelCase , {'''sequence''': ANY(__lowerCAmelCase ), '''labels''': [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )], '''scores''': [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
SCREAMING_SNAKE_CASE : List[Any] = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(__lowerCAmelCase , {'''sequence''': ANY(__lowerCAmelCase ), '''labels''': [ANY(__lowerCAmelCase )], '''scores''': [ANY(__lowerCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
SCREAMING_SNAKE_CASE : str = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
__lowerCAmelCase , [
{'''sequence''': ANY(__lowerCAmelCase ), '''labels''': [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )], '''scores''': [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )]}
for i in range(1 )
] , )
SCREAMING_SNAKE_CASE : Any = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
__lowerCAmelCase , [
{'''sequence''': ANY(__lowerCAmelCase ), '''labels''': [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )], '''scores''': [ANY(__lowerCAmelCase ), ANY(__lowerCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(__lowerCAmelCase ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(__lowerCAmelCase ):
classifier(__lowerCAmelCase , candidate_labels='''politics''' )
with self.assertRaises(__lowerCAmelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(__lowerCAmelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(__lowerCAmelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=__lowerCAmelCase , )
self.run_entailment_id(__lowerCAmelCase )
def __A ( self : List[str] , UpperCamelCase__ : Pipeline ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = zero_shot_classifier.model.config
SCREAMING_SNAKE_CASE : Tuple = config.labelaid
SCREAMING_SNAKE_CASE : List[str] = zero_shot_classifier.entailment_id
SCREAMING_SNAKE_CASE : str = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
SCREAMING_SNAKE_CASE : str = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE : int = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE : Optional[int] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
SCREAMING_SNAKE_CASE : List[Any] = original_labelaid
self.assertEqual(__lowerCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
SCREAMING_SNAKE_CASE : List[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
SCREAMING_SNAKE_CASE : List[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
SCREAMING_SNAKE_CASE : List[Any] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__lowerCAmelCase , )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
SCREAMING_SNAKE_CASE : Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
SCREAMING_SNAKE_CASE : List[str] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__lowerCAmelCase , )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 710 | import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
SCREAMING_SNAKE_CASE : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Optional[Any] = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_image_size(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Dict = output_height / input_height
SCREAMING_SNAKE_CASE : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : List[Any] = scale_height
SCREAMING_SNAKE_CASE : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : int , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Tuple] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
_lowerCAmelCase : Tuple = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_lowerCAmelCase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCAmelCase__ = field(
default=__lowercase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase__ = field(
default=__lowercase , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
lowerCAmelCase__ = field(default=__lowercase , metadata={'help': 'A folder containing the training data.'} )
lowerCAmelCase__ = field(default=__lowercase , metadata={'help': 'A folder containing the validation data.'} )
lowerCAmelCase__ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCAmelCase__ = field(default=3_2 , metadata={'help': 'The size of the square patches to use for masking.'} )
lowerCAmelCase__ = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
lowerCAmelCase__ = field(
default=__lowercase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=__lowercase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = {}
if self.train_dir is not None:
_lowerCamelCase : Union[str, Any] = self.train_dir
if self.validation_dir is not None:
_lowerCamelCase : str = self.validation_dir
_lowerCamelCase : int = data_files if data_files else None
@dataclass
class A_ :
lowerCAmelCase__ = field(
default=__lowercase , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCAmelCase__ = field(
default=__lowercase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__lowercase )} , )
lowerCAmelCase__ = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=__lowercase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCAmelCase__ = field(
default=__lowercase , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(default=__lowercase , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCAmelCase__ = field(
default=__lowercase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCAmelCase__ = field(
default=__lowercase , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
lowerCAmelCase__ = field(
default=__lowercase , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
lowerCAmelCase__ = field(
default=__lowercase , metadata={'help': 'Stride to use for the encoder.'} , )
class A_ :
def __init__( self: int ,__lowerCAmelCase: Dict=192 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: Tuple=4 ,__lowerCAmelCase: List[Any]=0.6 ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = input_size
_lowerCamelCase : int = mask_patch_size
_lowerCamelCase : List[str] = model_patch_size
_lowerCamelCase : str = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
_lowerCamelCase : Tuple = self.input_size // self.mask_patch_size
_lowerCamelCase : int = self.mask_patch_size // self.model_patch_size
_lowerCamelCase : int = self.rand_size**2
_lowerCamelCase : Dict = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = np.random.permutation(self.token_count )[: self.mask_count]
_lowerCamelCase : int = np.zeros(self.token_count ,dtype=SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : List[Any] = mask.reshape((self.rand_size, self.rand_size) )
_lowerCamelCase : Optional[int] = mask.repeat(self.scale ,axis=0 ).repeat(self.scale ,axis=1 )
return torch.tensor(mask.flatten() )
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : int = torch.stack([example["pixel_values"] for example in examples] )
_lowerCamelCase : str = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowerCamelCase_( ) -> int:
'''simple docstring'''
_lowerCamelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Any = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowerCamelCase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
_lowerCamelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowerCamelCase : int = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0:
_lowerCamelCase : Optional[int] = ds["train"].train_test_split(data_args.train_val_split )
_lowerCamelCase : Optional[Any] = split["train"]
_lowerCamelCase : int = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Any = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowerCAmelCase )
elif model_args.model_name_or_path:
_lowerCamelCase : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowerCAmelCase )
else:
_lowerCamelCase : Optional[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(__lowerCAmelCase , "decoder_type" ):
_lowerCamelCase : Dict = "simmim"
# adapt config
_lowerCamelCase : List[str] = model_args.image_size if model_args.image_size is not None else config.image_size
_lowerCamelCase : Optional[Any] = model_args.patch_size if model_args.patch_size is not None else config.patch_size
_lowerCamelCase : Union[str, Any] = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
_lowerCamelCase : str = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowerCAmelCase )
elif model_args.model_name_or_path:
_lowerCamelCase : Tuple = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowerCAmelCase )
else:
_lowerCamelCase : Any = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_lowerCamelCase : List[str] = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_lowerCamelCase : List[Any] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
_lowerCamelCase : List[str] = AutoModelForMaskedImageModeling.from_config(__lowerCAmelCase )
if training_args.do_train:
_lowerCamelCase : Union[str, Any] = ds["train"].column_names
else:
_lowerCamelCase : Optional[Any] = ds["validation"].column_names
if data_args.image_column_name is not None:
_lowerCamelCase : List[Any] = data_args.image_column_name
elif "image" in column_names:
_lowerCamelCase : Dict = "image"
elif "img" in column_names:
_lowerCamelCase : Optional[Any] = "img"
else:
_lowerCamelCase : List[Any] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_lowerCamelCase : List[str] = Compose(
[
Lambda(lambda _lowerCamelCase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.6_7, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
_lowerCamelCase : Optional[int] = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_lowerCamelCase ):
_lowerCamelCase : int = [transforms(__lowerCAmelCase ) for image in examples[image_column_name]]
_lowerCamelCase : List[str] = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_lowerCamelCase : List[str] = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowerCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_lowerCamelCase : Optional[int] = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowerCAmelCase )
# Initialize our trainer
_lowerCamelCase : Optional[Any] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
_lowerCamelCase : str = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : Dict = last_checkpoint
_lowerCamelCase : Optional[int] = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCamelCase : Tuple = trainer.evaluate()
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
_lowerCamelCase : List[str] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main() | 46 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Tuple = DDIMPipeline
a__ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
a__ : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
a__ : str = False
def __A ( self : Optional[int] ) -> Any:
torch.manual_seed(0 )
__lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
__lowerCamelCase = DDIMScheduler()
__lowerCamelCase = {'''unet''': unet, '''scheduler''': scheduler}
return components
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0 ) -> Tuple:
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : Tuple ) -> List[Any]:
__lowerCamelCase = '''cpu'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
__lowerCamelCase = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
__lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def __A ( self : Optional[int] ) -> Any:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __A ( self : Union[str, Any] ) -> Optional[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def __A ( self : Optional[int] ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __A ( self : List[Any] ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : List[Any] ) -> Tuple:
__lowerCamelCase = '''google/ddpm-cifar10-32'''
__lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DDIMScheduler()
__lowerCamelCase = DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddim.to(SCREAMING_SNAKE_CASE__ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = ddim(generator=SCREAMING_SNAKE_CASE__ , eta=0.0 , output_type='''numpy''' ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self : List[str] ) -> Optional[int]:
__lowerCamelCase = '''google/ddpm-ema-bedroom-256'''
__lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddpm.to(SCREAMING_SNAKE_CASE__ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = ddpm(generator=SCREAMING_SNAKE_CASE__ , output_type='''numpy''' ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__lowerCamelCase = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 298 | 0 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
__a: Tuple = logging.getLogger(__name__)
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Tuple = git.Repo(search_parent_directories=UpperCAmelCase )
lowercase__ : Optional[int] = {
'''repo_id''': str(UpperCAmelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(UpperCAmelCase , '''git_log.json''' ) , '''w''' ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase , indent=4 )
def __UpperCamelCase ( UpperCAmelCase ):
if params.n_gpu <= 0:
lowercase__ : Optional[int] = 0
lowercase__ : int = -1
lowercase__ : List[Any] = True
lowercase__ : str = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase__ : Optional[Any] = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : List[Any] = int(os.environ['''N_GPU_NODE'''] )
lowercase__ : int = int(os.environ['''RANK'''] )
# number of nodes / node ID
lowercase__ : Tuple = params.world_size // params.n_gpu_per_node
lowercase__ : Tuple = params.global_rank // params.n_gpu_per_node
lowercase__ : List[Any] = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase__ : List[str] = 1
lowercase__ : Dict = 0
lowercase__ : Optional[Any] = 0
lowercase__ : Dict = 0
lowercase__ : str = 1
lowercase__ : Optional[int] = 1
lowercase__ : int = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase__ : Union[str, Any] = params.node_id == 0 and params.local_rank == 0
lowercase__ : Dict = params.n_nodes > 1
# summary
lowercase__ : Any = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def __UpperCamelCase ( UpperCAmelCase ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 428 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__a: Dict = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Tuple = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[Any] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: str = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__a: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 428 | 1 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=None , _a=None ) -> List[str]:
_a : List[Any] = data
_a : List[str] = previous
_a : Union[str, Any] = next_node
def __str__( self ) -> str:
return F"""{self.data}"""
def __lowercase ( self ) -> int:
return self.data
def __lowercase ( self ) -> Union[str, Any]:
return self.next
def __lowercase ( self ) -> str:
return self.previous
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a ) -> str:
_a : int = head
def __iter__( self ) -> List[Any]:
return self
def __lowercase ( self ) -> Optional[int]:
if not self.current:
raise StopIteration
else:
_a : Optional[int] = self.current.get_data()
_a : Dict = self.current.get_next()
return value
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> str:
_a : Tuple = None # First node in list
_a : List[str] = None # Last node in list
def __str__( self ) -> List[str]:
_a : Optional[int] = self.head
_a : int = []
while current is not None:
nodes.append(current.get_data() )
_a : Optional[Any] = current.get_next()
return " ".join(str(_a ) for node in nodes )
def __contains__( self , _a ) -> Union[str, Any]:
_a : Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
_a : Optional[Any] = current.get_next()
return False
def __iter__( self ) -> Dict:
return LinkedListIterator(self.head )
def __lowercase ( self ) -> Any:
if self.head:
return self.head.get_data()
return None
def __lowercase ( self ) -> Tuple:
if self.tail:
return self.tail.get_data()
return None
def __lowercase ( self , _a ) -> None:
if self.head is None:
_a : str = node
_a : Tuple = node
else:
self.insert_before_node(self.head , _a )
def __lowercase ( self , _a ) -> None:
if self.head is None:
self.set_head(_a )
else:
self.insert_after_node(self.tail , _a )
def __lowercase ( self , _a ) -> None:
_a : str = Node(_a )
if self.head is None:
self.set_head(_a )
else:
self.set_tail(_a )
def __lowercase ( self , _a , _a ) -> None:
_a : List[str] = node
_a : str = node.previous
if node.get_previous() is None:
_a : Optional[int] = node_to_insert
else:
_a : List[str] = node_to_insert
_a : List[str] = node_to_insert
def __lowercase ( self , _a , _a ) -> None:
_a : Union[str, Any] = node
_a : List[str] = node.next
if node.get_next() is None:
_a : Any = node_to_insert
else:
_a : List[str] = node_to_insert
_a : Union[str, Any] = node_to_insert
def __lowercase ( self , _a , _a ) -> None:
_a : List[Any] = 1
_a : Any = Node(_a )
_a : Any = self.head
while node:
if current_position == position:
self.insert_before_node(_a , _a )
return
current_position += 1
_a : Tuple = node.next
self.insert_after_node(self.tail , _a )
def __lowercase ( self , _a ) -> Node:
_a : Optional[Any] = self.head
while node:
if node.get_data() == item:
return node
_a : Any = node.get_next()
raise Exception('''Node not found''' )
def __lowercase ( self , _a ) -> Union[str, Any]:
if (node := self.get_node(_a )) is not None:
if node == self.head:
_a : Optional[Any] = self.head.get_next()
if node == self.tail:
_a : Any = self.tail.get_previous()
self.remove_node_pointers(_a )
@staticmethod
def __lowercase ( _a ) -> None:
if node.get_next():
_a : Optional[int] = node.previous
if node.get_previous():
_a : List[Any] = node.next
_a : Optional[int] = None
_a : Tuple = None
def __lowercase ( self ) -> str:
return self.head is None
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | '''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = (DDPMScheduler,)
def _A ( self : Any , **A : List[str] ):
_UpperCAmelCase : int = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**A )
return config
def _A ( self : List[Any] ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=A )
def _A ( self : Union[str, Any] ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A , beta_end=A )
def _A ( self : Optional[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A )
def _A ( self : int ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A )
def _A ( self : Any ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A )
def _A ( self : Union[str, Any] ):
self.check_over_configs(thresholding=A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A , prediction_type=A , sample_max_value=A , )
def _A ( self : List[str] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def _A ( self : Union[str, Any] ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=A )
def _A ( self : Tuple ):
_UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _A ( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**A )
_UpperCAmelCase : Optional[Any] = len(A )
_UpperCAmelCase : List[Any] = self.dummy_model()
_UpperCAmelCase : List[str] = self.dummy_sample_deter
_UpperCAmelCase : List[str] = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
_UpperCAmelCase : List[Any] = model(A , A )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : List[Any] = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase : Any = pred_prev_sample
_UpperCAmelCase : str = torch.sum(torch.abs(A ) )
_UpperCAmelCase : Tuple = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Dict = self.get_scheduler_config(prediction_type="v_prediction" )
_UpperCAmelCase : Optional[int] = scheduler_class(**A )
_UpperCAmelCase : Union[str, Any] = len(A )
_UpperCAmelCase : Optional[int] = self.dummy_model()
_UpperCAmelCase : Optional[Any] = self.dummy_sample_deter
_UpperCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
_UpperCAmelCase : Tuple = model(A , A )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : List[Any] = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase : Tuple = pred_prev_sample
_UpperCAmelCase : List[str] = torch.sum(torch.abs(A ) )
_UpperCAmelCase : int = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**A )
_UpperCAmelCase : Any = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A )
_UpperCAmelCase : Optional[Any] = scheduler.timesteps
for i, timestep in enumerate(A ):
if i == len(A ) - 1:
_UpperCAmelCase : int = -1
else:
_UpperCAmelCase : str = timesteps[i + 1]
_UpperCAmelCase : Any = scheduler.previous_timestep(A )
_UpperCAmelCase : Optional[Any] = prev_t.item()
self.assertEqual(A , A )
def _A ( self : Optional[int] ):
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**A )
_UpperCAmelCase : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(A , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=A )
def _A ( self : Dict ):
_UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
_UpperCAmelCase : Tuple = self.get_scheduler_config()
_UpperCAmelCase : str = scheduler_class(**A )
_UpperCAmelCase : str = [100, 87, 50, 1, 0]
_UpperCAmelCase : Tuple = len(A )
with self.assertRaises(A , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=A , timesteps=A )
def _A ( self : List[str] ):
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**A )
_UpperCAmelCase : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=A )
| 244 | 0 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_a : str = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
_a : List[Any] = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
_a : int = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
_a : str = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
_a : List[str] = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
for tf_name, hf_name in patterns:
__UpperCAmelCase : Union[str, Any] = k.replace(lowerCamelCase__ , lowerCamelCase__ )
return k
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
__UpperCAmelCase : List[Any] = BigBirdPegasusConfig(**lowerCamelCase__ )
__UpperCAmelCase : List[Any] = BigBirdPegasusForConditionalGeneration(lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = torch_model.state_dict()
__UpperCAmelCase : Dict = {}
# separating decoder weights
__UpperCAmelCase : Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : Any = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
__UpperCAmelCase : Union[str, Any] = [k.endswith(lowerCamelCase__ ) for ending in KEYS_TO_IGNORE]
if any(lowerCamelCase__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : Optional[Any] = rename_state_dict_key(lowerCamelCase__ , lowerCamelCase__ )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[str] = v.T
__UpperCAmelCase : int = torch.from_numpy(lowerCamelCase__ )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(lowerCamelCase__ ) for ending in KEYS_TO_IGNORE]
if any(lowerCamelCase__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Any = rename_state_dict_key(lowerCamelCase__ , lowerCamelCase__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[Any] = v.T
__UpperCAmelCase : int = torch.from_numpy(lowerCamelCase__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__UpperCAmelCase : List[str] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch_model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def _lowercase ( lowerCamelCase__ ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Tuple = tf.train.list_variables(lowerCamelCase__ )
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Tuple = ["global_step"]
for name, shape in tqdm(lowerCamelCase__ , desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Any = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : str = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = get_tf_weights_as_numpy(lowerCamelCase__ )
__UpperCAmelCase : Tuple = convert_bigbird_pegasus(lowerCamelCase__ , lowerCamelCase__ )
torch_model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_a : List[Any] = parser.parse_args()
_a : Union[str, Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 10 | '''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_a : str = datasets.load_iris()
_a : List[Any] = np.array(data["data"])
_a : Optional[Any] = np.array(data["target"])
_a : Dict = data["target_names"]
_a , _a , _a , _a : Any = train_test_split(X, y)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
__UpperCAmelCase : int = []
for data_point in data:
__UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 10 | 1 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
__UpperCAmelCase : str = b * b - 4 * a * c
__UpperCAmelCase : Any = (-b + sqrt(UpperCamelCase )) / (2 * a)
__UpperCAmelCase : Union[str, Any] = (-b - sqrt(UpperCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : List[str] = quadratic_roots(a=5 , b=6 , c=1 )
print(f"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 77 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : str ) -> List[str]:
with open(_snake_case , encoding="utf-8" ) as input_file:
SCREAMING_SNAKE_CASE__ = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
SCREAMING_SNAKE_CASE__ = input_file.read()
SCREAMING_SNAKE_CASE__ = regexp.search(_snake_case )
return match
def lowerCAmelCase_ ( self : str , _snake_case : str ) -> List[str]:
with open(_snake_case , encoding="utf-8" ) as input_file:
SCREAMING_SNAKE_CASE__ = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
SCREAMING_SNAKE_CASE__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
SCREAMING_SNAKE_CASE__ = regexp.finditer(_snake_case )
SCREAMING_SNAKE_CASE__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCAmelCase_ ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = Path("./datasets" )
SCREAMING_SNAKE_CASE__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_snake_case ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def lowerCAmelCase_ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = Path("./datasets" )
SCREAMING_SNAKE_CASE__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_snake_case ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 159 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCamelCase : List[Any] ):
UpperCAmelCase : Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) )
UpperCAmelCase : Optional[int] = FileLock(str(tmpdir / """foo.lock""" ) )
UpperCAmelCase : List[str] = 0.01
with locka.acquire():
with pytest.raises(UpperCamelCase ):
UpperCAmelCase : Union[str, Any] = time.time()
locka.acquire(UpperCamelCase )
assert time.time() - _start > timeout
def _snake_case ( UpperCamelCase : Optional[Any] ):
UpperCAmelCase : Tuple = """a""" * 1000 + """.lock"""
UpperCAmelCase : List[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase : int = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCamelCase ):
locka.acquire(0 )
| 359 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
A: Optional[int] = logging.get_logger(__name__)
A: Union[str, Any] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[str] = 'gpt_neo'
__lowerCAmelCase : Any = ['past_key_values']
__lowerCAmelCase : int = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , _SCREAMING_SNAKE_CASE=50257 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=50256 , _SCREAMING_SNAKE_CASE=50256 , **_SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
UpperCAmelCase : int = vocab_size
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : str = hidden_size
UpperCAmelCase : Dict = num_layers
UpperCAmelCase : List[Any] = num_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Dict = window_size
UpperCAmelCase : int = activation_function
UpperCAmelCase : Tuple = resid_dropout
UpperCAmelCase : int = embed_dropout
UpperCAmelCase : Optional[int] = attention_dropout
UpperCAmelCase : Optional[int] = classifier_dropout
UpperCAmelCase : Any = layer_norm_epsilon
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : List[str] = use_cache
UpperCAmelCase : Union[str, Any] = bos_token_id
UpperCAmelCase : str = eos_token_id
UpperCAmelCase : Union[str, Any] = attention_types
UpperCAmelCase : int = self.expand_attention_types_params(_SCREAMING_SNAKE_CASE )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
F"`config.num_layers = {self.num_layers}`. "
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@staticmethod
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ):
import torch
UpperCAmelCase : Optional[Any] = input.size()
UpperCAmelCase : int = len(UpperCamelCase )
UpperCAmelCase : Optional[Any] = shape[dimension]
UpperCAmelCase : Union[str, Any] = torch.arange(0 , UpperCamelCase , UpperCamelCase )
UpperCAmelCase : List[Any] = torch.div(sizedim - size , UpperCamelCase , rounding_mode="""floor""" ) + 1
UpperCAmelCase : Optional[Any] = torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
UpperCAmelCase : List[Any] = [slice(UpperCamelCase )] * rank
UpperCAmelCase : Optional[Any] = indices
UpperCAmelCase : Optional[Any] = input[s]
UpperCAmelCase : List[Any] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Dict ):
import torch
UpperCAmelCase : List[str] = torch.arange(1 , UpperCamelCase )
UpperCAmelCase : Any = torch.remainder(UpperCamelCase , UpperCamelCase )
UpperCAmelCase : Union[str, Any] = remainders == 0
UpperCAmelCase : str = candidates[divisor_indices]
UpperCAmelCase : Optional[int] = torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="""floor""" )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
UpperCAmelCase : Any = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="""inputs""" )
UpperCAmelCase : str = {0: """batch""", 1: """past_sequence + sequence"""}
else:
UpperCAmelCase : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self._config.num_heads
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = super(_SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Dict = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase , UpperCAmelCase : List[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCAmelCase : Any = seqlen + 2
UpperCAmelCase : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : Optional[int] = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
UpperCAmelCase : List[str] = common_inputs["""attention_mask"""]
if self.use_past:
UpperCAmelCase : str = ordered_inputs["""attention_mask"""].dtype
UpperCAmelCase : int = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 13
| 359 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_A : List[str] = logging.get_logger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : int = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = 1 / 2_55 , A_ = True , A_ = None , A_ = True , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 2_24}
SCREAMING_SNAKE_CASE__ = get_size_dict(A_ , default_to_square=A_ )
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {'''height''': 2_56, '''width''': 2_56}
SCREAMING_SNAKE_CASE__ = get_size_dict(A_ , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size
SCREAMING_SNAKE_CASE__ = do_flip_channel_order
def lowercase_ ( self , A_ , A_ , A_ = PIL.Image.BILINEAR , A_ = None , **A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ = get_resize_output_image_size(A_ , size=size['''shortest_edge'''] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def lowercase_ ( self , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(A_ , size=(size['''height'''], size['''width''']) , data_format=A_ , **A_ )
def lowercase_ ( self , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
return flip_channel_order(A_ , data_format=A_ )
def lowercase_ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(A_ , default_to_square=A_ )
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ = get_size_dict(A_ , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(A_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=A_ , scale=A_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
SCREAMING_SNAKE_CASE__ = [self.flip_channel_order(image=A_ ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(A_ , A_ ) for image in images]
SCREAMING_SNAKE_CASE__ = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(A_ ):
SCREAMING_SNAKE_CASE__ = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ = []
for idx in range(len(A_ ) ):
SCREAMING_SNAKE_CASE__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ )
SCREAMING_SNAKE_CASE__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
SCREAMING_SNAKE_CASE__ = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 100 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int = 6 ):
"""simple docstring"""
_lowercase : Node | None = None
_lowercase : Node | None = None
self.create_linked_list(UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase : int ):
"""simple docstring"""
_lowercase : Union[str, Any] = Node()
_lowercase : Any = current_node
_lowercase : List[Any] = current_node
_lowercase : str = current_node
for _ in range(1 , UpperCamelCase ):
_lowercase : Any = Node()
_lowercase : Union[str, Any] = current_node
_lowercase : List[str] = previous_node
_lowercase : List[str] = current_node
_lowercase : str = self.front
_lowercase : Optional[int] = previous_node
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCAmelCase_ ( self : Any , UpperCamelCase : Any ):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_lowercase : List[str] = self.rear.next
if self.rear:
_lowercase : Union[str, Any] = data
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_lowercase : Optional[int] = self.front.data
_lowercase : Any = None
return data
_lowercase : Union[str, Any] = self.front
_lowercase : int = old_front.next
_lowercase : Any = old_front.data
_lowercase : Any = None
return data
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
if self.is_empty():
raise Exception('''Empty Queue''' )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : int ):
"""simple docstring"""
_lowercase : Any | None = None
_lowercase : Node | None = None
_lowercase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod() | 322 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ (_lowerCAmelCase : Optional[int] )-> Optional[Any]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCamelCase ( lowerCamelCase__ ):
@staticmethod
def lowerCAmelCase_ ( __lowerCamelCase ) -> Dict:
'''simple docstring'''
snake_case: Tuple = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__lowerCamelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=__lowerCamelCase )
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
'''simple docstring'''
snake_case: Any = model
snake_case: Optional[int] = cache
snake_case: Optional[Any] = force
snake_case: int = trust_remote_code
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 702 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **__lowerCamelCase ) -> Any:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case: List[Any] = deprecated_arg[3:]
setattr(self , __lowerCamelCase , not kwargs.pop(__lowerCamelCase ) )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
snake_case: Tuple = kwargs.pop("""torchscript""" , self.torchscript )
snake_case: int = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
snake_case: List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowerCamelCase )
__lowerCamelCase = field(default=__snake_case , metadata={'help': 'Trace the models using torchscript'} )
__lowerCamelCase = field(default=__snake_case , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
__lowerCamelCase = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def lowerCAmelCase_ ( self ) -> Tuple["torch.device", int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
snake_case: List[str] = torch.device("""cpu""" )
snake_case: Optional[int] = 0
elif is_torch_tpu_available():
snake_case: Union[str, Any] = xm.xla_device()
snake_case: Dict = 0
else:
snake_case: List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
snake_case: str = torch.cuda.device_count()
return device, n_gpu
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowerCAmelCase_ ( self ) -> "torch.device":
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
return self.n_gpu > 0
| 164 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_UpperCAmelCase : Dict =mf_knapsack(i - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
_UpperCAmelCase : Dict =max(
mf_knapsack(i - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , mf_knapsack(i - 1 , __UpperCamelCase , __UpperCamelCase , j - wt[i - 1] ) + val[i - 1] , )
_UpperCAmelCase : List[Any] =val
return f[i][j]
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =[[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_UpperCAmelCase : List[str] =max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_UpperCAmelCase : Tuple =dp[i - 1][w_]
return dp[n][w_], dp
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : list , __lowerCamelCase : list ):
'''simple docstring'''
if not (isinstance(__UpperCamelCase , (list, tuple) ) and isinstance(__UpperCamelCase , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
_UpperCAmelCase : Tuple =len(__UpperCamelCase )
if num_items != len(__UpperCamelCase ):
_UpperCAmelCase : str =(
'The number of weights must be the same as the number of values.\n'
f"But got {num_items} weights and {len(__UpperCamelCase )} values"
)
raise ValueError(__UpperCamelCase )
for i in range(__UpperCamelCase ):
if not isinstance(wt[i] , __UpperCamelCase ):
_UpperCAmelCase : int =(
'All weights must be integers but got weight of '
f"type {type(wt[i] )} at index {i}"
)
raise TypeError(__UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =knapsack(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase : int =set()
_construct_solution(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return optimal_val, example_optional_set
def lowerCamelCase__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : set ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__UpperCamelCase , __UpperCamelCase , i - 1 , __UpperCamelCase , __UpperCamelCase )
else:
optimal_set.add(__UpperCamelCase )
_construct_solution(__UpperCamelCase , __UpperCamelCase , i - 1 , j - wt[i - 1] , __UpperCamelCase )
if __name__ == "__main__":
lowercase =[3, 2, 4, 4]
lowercase =[4, 3, 2, 3]
lowercase =4
lowercase =6
lowercase =[[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowercase =knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowercase =knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 446 |
from __future__ import annotations
import os
from typing import Any
import requests
lowerCAmelCase : int = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCAmelCase : int = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowerCAmelCase : List[Any] = os.environ.get('''USER_TOKEN''', '''''')
def _lowercase ( __UpperCamelCase : str ):
snake_case__ = {
"""Authorization""": F'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 214 | 0 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def a__ ( lowercase__ ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def a__ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ ="mock-s3-bucket"
UpperCAmelCase_ =F's3://{mock_bucket}'
UpperCAmelCase_ =extract_path_from_uri(lowercase__ )
assert dataset_path.startswith("s3://" ) is False
UpperCAmelCase_ ="./local/path"
UpperCAmelCase_ =extract_path_from_uri(lowercase__ )
assert dataset_path == new_dataset_path
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =is_remote_filesystem(lowercase__ )
assert is_remote is True
UpperCAmelCase_ =fsspec.filesystem("file" )
UpperCAmelCase_ =is_remote_filesystem(lowercase__ )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
UpperCAmelCase_ =input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase_ =F'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowercase__ )
UpperCAmelCase_ =fsspec.filesystem(compression_fs_class.protocol , fo=lowercase__ )
assert isinstance(lowercase__ , lowercase__ )
UpperCAmelCase_ =os.path.basename(lowercase__ )
UpperCAmelCase_ =expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(lowercase__ , "r" , encoding="utf-8" ) as f, open(lowercase__ , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
UpperCAmelCase_ =compressed_file_paths[protocol]
UpperCAmelCase_ ="dataset.jsonl"
UpperCAmelCase_ =F'{protocol}://{member_file_path}::{compressed_file_path}'
UpperCAmelCase_ , *UpperCAmelCase_ =fsspec.get_fs_token_paths(lowercase__ )
assert fs.isfile(lowercase__ )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =hf_api.dataset_info(lowercase__ , token=lowercase__ )
UpperCAmelCase_ =HfFileSystem(repo_info=lowercase__ , token=lowercase__ )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(lowercase__ ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ ="bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowercase__ , lowercase__ , clobber=lowercase__ )
with pytest.warns(lowercase__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowercase__ ) == 1
assert (
str(warning_info[0].message )
== F'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 720 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : Optional[Any] ={
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] =[
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowercase : str =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 550 | 0 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> str:
__lowerCAmelCase = 'hf-internal-testing/tiny-random-t5'
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer('This is me' , return_tensors='pt' )
__lowerCAmelCase = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__lowerCAmelCase = model.generate(**lowerCAmelCase_ )
__lowerCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__lowerCAmelCase = model_reloaded.generate(**lowerCAmelCase_ )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ ) )
def lowercase ( self : int ) -> Dict:
__lowerCAmelCase = 'hf-internal-testing/tiny-random-t5'
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCAmelCase_ ):
model.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(lowerCAmelCase_ )
| 53 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_snake_case : Union[str, Any] = imread(R'digital_image_processing/image_data/lena_small.jpg')
_snake_case : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a_ ( ):
__lowerCAmelCase = cn.convert_to_negative(lowerCAmelCase_ )
# assert negative_img array for at least one True
assert negative_img.any()
def a_ ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase_, 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def a_ ( ):
__lowerCAmelCase = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a_ ( ):
__lowerCAmelCase = imread('digital_image_processing/image_data/lena_small.jpg', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(lowerCAmelCase_ )
# assert canny array for at least one True
assert canny_array.any()
def a_ ( ):
assert gg.gaussian_filter(lowerCAmelCase_, 5, sigma=0.9 ).all()
def a_ ( ):
# laplace diagonals
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(lowerCAmelCase_, lowerCAmelCase_ ).astype(lowerCAmelCase_ )
assert res.any()
def a_ ( ):
assert med.median_filter(lowerCAmelCase_, 3 ).any()
def a_ ( ):
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(lowerCAmelCase_ )
assert grad.any() and theta.any()
def a_ ( ):
__lowerCAmelCase = sp.make_sepia(lowerCAmelCase_, 20 )
assert sepia.all()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg" ):
__lowerCAmelCase = bs.Burkes(imread(lowerCAmelCase_, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg", ):
__lowerCAmelCase = rs.NearestNeighbour(imread(lowerCAmelCase_, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def a_ ( ):
__lowerCAmelCase = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(lowerCAmelCase_, 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert lbp_image.any()
| 53 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowercase (_snake_case ):
_lowerCamelCase = 42
class __lowercase (_snake_case , _snake_case ):
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase_ : int = 16 , UpperCAmelCase_ : int = 88 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "geglu" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ):
super().__init__()
UpperCamelCase__ : int = num_attention_heads
UpperCamelCase__ : Union[str, Any] = attention_head_dim
UpperCamelCase__ : Any = num_attention_heads * attention_head_dim
UpperCamelCase__ : Tuple = in_channels
UpperCamelCase__ : Dict = torch.nn.GroupNorm(num_groups=UpperCAmelCase_ , num_channels=UpperCAmelCase_ , eps=1e-6 , affine=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_)
# 3. Define transformers blocks
UpperCamelCase__ : Tuple = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , dropout=UpperCAmelCase_ , cross_attention_dim=UpperCAmelCase_ , activation_fn=UpperCAmelCase_ , attention_bias=UpperCAmelCase_ , double_self_attention=UpperCAmelCase_ , norm_elementwise_affine=UpperCAmelCase_ , )
for d in range(UpperCAmelCase_)
])
UpperCamelCase__ : List[str] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : bool = True , ):
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = hidden_states.shape
UpperCamelCase__ : List[Any] = batch_frames // num_frames
UpperCamelCase__ : Any = hidden_states
UpperCamelCase__ : Dict = hidden_states[None, :].reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = hidden_states.permute(0 , 2 , 1 , 3 , 4)
UpperCamelCase__ : List[Any] = self.norm(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1).reshape(batch_size * height * width , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.proj_in(UpperCAmelCase_)
# 2. Blocks
for block in self.transformer_blocks:
UpperCamelCase__ : Optional[Any] = block(
UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , timestep=UpperCAmelCase_ , cross_attention_kwargs=UpperCAmelCase_ , class_labels=UpperCAmelCase_ , )
# 3. Output
UpperCamelCase__ : Optional[Any] = self.proj_out(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
.permute(0 , 3 , 4 , 1 , 2)
.contiguous()
)
UpperCamelCase__ : Tuple = hidden_states.reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase_)
| 712 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[Any] = (DEISMultistepScheduler,)
A : str = (('num_inference_steps', 25),)
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = dict(self.forward_default_kwargs )
lowercase : int = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self.dummy_sample
lowercase : List[Any] = 0.1 * sample
lowercase : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase : List[str] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowercase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowercase : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase : Optional[Any] = sample, sample
for t in range(SCREAMING_SNAKE_CASE__ , time_step + scheduler.config.solver_order + 1 ):
lowercase : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowercase : Any = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = dict(self.forward_default_kwargs )
lowercase : Any = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE__ )
lowercase : str = self.dummy_sample
lowercase : Dict = 0.1 * sample
lowercase : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase : Optional[int] = self.get_scheduler_config()
lowercase : str = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
lowercase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
lowercase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowercase : Dict = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
if scheduler is None:
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : Optional[int] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowercase : int = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowercase : int = self.scheduler_classes[0]
lowercase : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowercase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowercase : str = 10
lowercase : str = self.dummy_model()
lowercase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase : Optional[int] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def __lowerCamelCase ( self ):
lowercase : List[Any] = dict(self.forward_default_kwargs )
lowercase : Optional[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
lowercase : Union[str, Any] = self.get_scheduler_config()
lowercase : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self.dummy_sample
lowercase : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , '''set_timesteps''' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , '''set_timesteps''' ):
lowercase : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
lowercase : Any = scheduler.timesteps[5]
lowercase : Dict = scheduler.timesteps[6]
lowercase : int = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowercase : List[str] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCamelCase ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase : Any = DEISMultistepScheduler(**self.get_scheduler_config() )
lowercase : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
lowercase : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase : str = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase : Tuple = DEISMultistepScheduler.from_config(scheduler.config )
lowercase : Any = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
lowercase : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def __lowerCamelCase ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , algorithm_type='''deis''' , solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , )
def __lowerCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
lowercase : Optional[int] = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
assert not torch.isnan(SCREAMING_SNAKE_CASE__ ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ , time_step=0 )
def __lowerCamelCase ( self ):
lowercase : List[Any] = self.full_loop()
lowercase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = self.full_loop(prediction_type='''v_prediction''' )
lowercase : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : Any = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE__ , dynamic_thresholding_ratio=0 )
lowercase : str = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = 10
lowercase : Optional[int] = self.dummy_model()
lowercase : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Any = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
assert sample.dtype == torch.floataa
| 319 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_SCREAMING_SNAKE_CASE = '''src/transformers'''
_SCREAMING_SNAKE_CASE = '''docs/source/en'''
_SCREAMING_SNAKE_CASE = '''.'''
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase = f.readlines()
# Find the start prompt.
__lowercase = 0
while not lines[start_index].startswith(lowerCamelCase_ ):
start_index += 1
start_index += 1
__lowercase = start_index
while not lines[end_index].startswith(lowerCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_SCREAMING_SNAKE_CASE = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
_SCREAMING_SNAKE_CASE = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
_SCREAMING_SNAKE_CASE = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_SCREAMING_SNAKE_CASE = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
_SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH)
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
__lowercase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCamelCase_ )
return [m.group(0 ) for m in matches]
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int ):
__lowercase = 2 if text == '''✅''' or text == '''❌''' else len(lowerCamelCase_ )
__lowercase = (width - text_length) // 2
__lowercase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowerCAmelCase ( ):
__lowercase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__lowercase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__lowercase = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__lowercase = collections.defaultdict(lowerCamelCase_ )
__lowercase = collections.defaultdict(lowerCamelCase_ )
__lowercase = collections.defaultdict(lowerCamelCase_ )
__lowercase = collections.defaultdict(lowerCamelCase_ )
__lowercase = collections.defaultdict(lowerCamelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCamelCase_ ):
__lowercase = None
if attr_name.endswith('''Tokenizer''' ):
__lowercase = slow_tokenizers
__lowercase = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
__lowercase = fast_tokenizers
__lowercase = attr_name[:-1_3]
elif _re_tf_models.match(lowerCamelCase_ ) is not None:
__lowercase = tf_models
__lowercase = _re_tf_models.match(lowerCamelCase_ ).groups()[0]
elif _re_flax_models.match(lowerCamelCase_ ) is not None:
__lowercase = flax_models
__lowercase = _re_flax_models.match(lowerCamelCase_ ).groups()[0]
elif _re_pt_models.match(lowerCamelCase_ ) is not None:
__lowercase = pt_models
__lowercase = _re_pt_models.match(lowerCamelCase_ ).groups()[0]
if lookup_dict is not None:
while len(lowerCamelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
__lowercase = True
break
# Try again after removing the last word in the name
__lowercase = ''''''.join(camel_case_split(lowerCamelCase_ )[:-1] )
# Let's build that table!
__lowercase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__lowercase = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__lowercase = [len(lowerCamelCase_ ) + 2 for c in columns]
__lowercase = max([len(lowerCamelCase_ ) for name in model_names] ) + 2
# Build the table per se
__lowercase = '''|''' + '''|'''.join([_center_text(lowerCamelCase_ , lowerCamelCase_ ) for c, w in zip(lowerCamelCase_ , lowerCamelCase_ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
__lowercase = {True: '''✅''', False: '''❌'''}
for name in model_names:
__lowercase = model_name_to_prefix[name]
__lowercase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCamelCase_ , lowerCamelCase_ ) for l, w in zip(lowerCamelCase_ , lowerCamelCase_ )] ) + "|\n"
return table
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any]=False ):
__lowercase , __lowercase , __lowercase , __lowercase = _find_text_in_file(
filename=os.path.join(lowerCamelCase_ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
__lowercase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCamelCase_ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 700 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowercase = TransformeraDModel(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowercase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
'''teddy bear playing in the pool''' ,num_images_per_prompt=1 ,generator=_lowerCamelCase ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 56 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=99, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=9, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__=8, lowerCamelCase__=0.1, lowerCamelCase__=0.002, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=0, lowerCamelCase__=None, lowerCamelCase__=None, ):
A : List[Any] = parent
A : List[Any] = batch_size
A : str = encoder_seq_length
A : Dict = decoder_seq_length
# For common tests
A : Tuple = self.decoder_seq_length
A : Optional[int] = is_training
A : List[str] = use_attention_mask
A : Tuple = use_labels
A : Union[str, Any] = vocab_size
A : List[Any] = hidden_size
A : Optional[int] = num_hidden_layers
A : List[str] = num_attention_heads
A : str = d_ff
A : int = relative_attention_num_buckets
A : Any = dropout_rate
A : int = initializer_factor
A : Union[str, Any] = eos_token_id
A : Optional[Any] = pad_token_id
A : int = decoder_start_token_id
A : Optional[int] = None
A : Any = decoder_layers
def _lowerCAmelCase ( self ):
return TaConfig.from_pretrained("""google/umt5-base""" )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, ):
if attention_mask is None:
A : Dict = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
A : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
A : Dict = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=lowerCamelCase__ )
if decoder_head_mask is None:
A : int = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=lowerCamelCase__ )
if cross_attn_head_mask is None:
A : Any = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=lowerCamelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowerCAmelCase ( self ):
A : str = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
A : int = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A : List[Any] = input_ids.clamp(self.pad_token_id + 1 )
A : Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 )
A : Dict = self.get_config()
A : List[Any] = config.num_attention_heads
A : Dict = self.prepare_inputs_dict(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return config, input_dict
def _lowerCAmelCase ( self ):
A , A : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self ):
return TaConfig(
vocab_size=166, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def _lowerCAmelCase ( self ):
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : List[Any] = UMTaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Union[str, Any] = model(
input_ids=lowerCamelCase__, decoder_input_ids=lowerCamelCase__, attention_mask=lowerCamelCase__, decoder_attention_mask=lowerCamelCase__, )
A : int = model(input_ids=lowerCamelCase__, decoder_input_ids=lowerCamelCase__ )
A : Any = result.last_hidden_state
A : str = result.past_key_values
A : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase__ ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : str = UMTaModel(config=lowerCamelCase__ ).get_decoder().to(lowerCamelCase__ ).eval()
# first forward pass
A : Tuple = model(lowerCamelCase__, use_cache=lowerCamelCase__ )
A : int = model(lowerCamelCase__ )
A : Optional[int] = model(lowerCamelCase__, use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
A , A : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A : Union[str, Any] = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
A : Any = torch.cat([input_ids, next_tokens], dim=-1 )
A : List[Any] = model(lowerCamelCase__ )["""last_hidden_state"""]
A : Optional[Any] = model(lowerCamelCase__, past_key_values=lowerCamelCase__ )["""last_hidden_state"""]
# select random slice
A : List[str] = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Any = output_from_no_past[:, -1, random_slice_idx].detach()
A : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, ):
A : Union[str, Any] = UMTaModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).half().eval()
A : str = model(**lowerCamelCase__ )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(lowerCamelCase__ ).any().item() )
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__lowerCamelCase : List[str] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__lowerCamelCase : List[str] = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__lowerCamelCase : str = True
__lowerCamelCase : List[str] = False
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = True
__lowerCamelCase : Optional[int] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__lowerCamelCase : Tuple = [0.8, 0.9]
def _lowerCAmelCase ( self ):
A : Optional[int] = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _lowerCAmelCase ( self ):
A : List[Any] = self.model_tester.prepare_config_and_inputs()
A : Optional[int] = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase__, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=lowerCamelCase__, opset_version=9, input_names=["""input_ids""", """decoder_input_ids"""], )
@unittest.skipIf(torch_device == """cpu""", """Cant do half precision""" )
def _lowerCAmelCase ( self ):
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Tuple = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
A : Tuple = self.model_tester.prepare_config_and_inputs()
A : Dict = config_and_inputs[0]
A : Any = UMTaForConditionalGeneration(lowerCamelCase__ ).eval()
model.to(lowerCamelCase__ )
A : Union[str, Any] = {
"""head_mask""": torch.zeros(config.num_layers, config.num_heads, device=lowerCamelCase__ ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers, config.num_heads, device=lowerCamelCase__ ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers, config.num_heads, device=lowerCamelCase__ ),
}
for attn_name, (name, mask) in zip(lowerCamelCase__, head_masking.items() ):
A : Dict = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
A : List[Any] = torch.ones(
config.num_decoder_layers, config.num_heads, device=lowerCamelCase__ )
A : Optional[Any] = model.generate(
config_and_inputs[1]["""input_ids"""], num_beams=1, max_length=3, output_attentions=lowerCamelCase__, return_dict_in_generate=lowerCamelCase__, **lowerCamelCase__, )
# We check the state of decoder_attentions and cross_attentions just from the last step
A : Any = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _lowerCAmelCase ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _lowerCAmelCase ( self ):
A : List[str] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""", return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
A : Any = AutoTokenizer.from_pretrained("""google/umt5-small""", use_fast=lowerCamelCase__, legacy=lowerCamelCase__ )
A : Optional[int] = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
A : int = tokenizer(lowerCamelCase__, return_tensors="""pt""", padding=lowerCamelCase__ ).input_ids
# fmt: off
A : Any = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase__, lowerCamelCase__ )
A : str = model.generate(input_ids.to(lowerCamelCase__ ) )
A : int = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
A : Optional[int] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__, lowerCamelCase__ )
| 662 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def A__ ( _a : List[Any] , _a : Union[str, Any] , _a : int ):
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , _a )
snake_case__ : List[str] =datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
snake_case__ : Union[str, Any] =dataset_size < in_memory_max_size
else:
snake_case__ : Union[str, Any] =False
snake_case__ : List[str] =is_small_dataset(_a )
assert result == expected
| 448 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( _a : int , _a : Any , _a : Union[str, Any] , _a : Tuple ):
'''simple docstring'''
snake_case__ : Any =BigBirdConfig.from_json_file(_a )
print(f"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
snake_case__ : str =BigBirdForQuestionAnswering(_a )
else:
snake_case__ : Optional[int] =BigBirdForPreTraining(_a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_a , _a , is_trivia_qa=_a )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_a )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 448 | 1 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowercase : Optional[int] = 'scheduler_config.json'
class A ( __snake_case ):
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
__magic_name__ = 5
__magic_name__ = 6
__magic_name__ = 7
__magic_name__ = 8
__magic_name__ = 9
__magic_name__ = 10
__magic_name__ = 11
__magic_name__ = 12
__magic_name__ = 13
__magic_name__ = 14
@dataclass
class A ( __snake_case ):
__magic_name__ = 42
class A :
__magic_name__ = SCHEDULER_CONFIG_NAME
__magic_name__ = []
__magic_name__ = True
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A, A, A : str = cls.load_config(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE , subfolder=SCREAMING_SNAKE_CASE , return_unused_kwargs=SCREAMING_SNAKE_CASE , return_commit_hash=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
return cls.from_config(SCREAMING_SNAKE_CASE , return_unused_kwargs=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
self.save_config(save_directory=SCREAMING_SNAKE_CASE , push_to_hub=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def __lowerCAmelCase ( cls ) -> Tuple:
"""simple docstring"""
A : Tuple = list(set([cls.__name__] + cls._compatibles ) )
A : Tuple = importlib.import_module(__name__.split('''.''' )[0] )
A : Optional[int] = [
getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
return compatible_classes
| 634 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Tuple = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 634 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
from __future__ import annotations
class _lowercase :
def __init__( self , _UpperCAmelCase ):
A : str = data
A : Node | None = None
A : Node | None = None
def _lowerCamelCase( UpperCamelCase__ : Node | None ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _lowerCamelCase( UpperCamelCase__ : Node | None ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _lowerCamelCase( UpperCamelCase__ : Node ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _lowerCamelCase( ) -> None: # Main function for testing.
A : Optional[int] = Node(1 )
A : Tuple = Node(2 )
A : Dict = Node(3 )
A : List[str] = Node(4 )
A : Union[str, Any] = Node(5 )
A : str = Node(6 )
A : Any = Node(7 )
A : str = Node(8 )
A : Optional[int] = Node(9 )
print(is_full_binary_tree(UpperCamelCase__ ) )
print(depth_of_tree(UpperCamelCase__ ) )
print('''Tree is: ''' )
display(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 537 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.