code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE: int = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class lowercase_ (lowercase_ , unittest.TestCase ):
lowerCAmelCase__ =PegasusTokenizer
lowerCAmelCase__ =PegasusTokenizerFast
lowerCAmelCase__ =True
lowerCAmelCase__ =True
def __a ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ = PegasusTokenizer(UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __a ( self : Tuple ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def __a ( self : Any , **snake_case__ : List[Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __a ( self : Optional[int] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return ("This is a test", "This is a test")
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '</s>'
SCREAMING_SNAKE_CASE_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(UpperCamelCase__ ) , 11_03 )
def __a ( self : Any ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE_ = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
SCREAMING_SNAKE_CASE_ = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE_ = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
SCREAMING_SNAKE_CASE_ = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
SCREAMING_SNAKE_CASE_ = tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
SCREAMING_SNAKE_CASE_ = 'To ensure a smooth flow of bank resolutions.'
SCREAMING_SNAKE_CASE_ = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
SCREAMING_SNAKE_CASE_ = tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['This is going to be way too long.' * 1_50, 'short example']
SCREAMING_SNAKE_CASE_ = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE_ = self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = self._large_tokenizer(
text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask.
@slow
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {'input_ids': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class lowercase_ (lowercase_ , unittest.TestCase ):
lowerCAmelCase__ =PegasusTokenizer
lowerCAmelCase__ =PegasusTokenizerFast
lowerCAmelCase__ =True
lowerCAmelCase__ =True
def __a ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ = PegasusTokenizer(UpperCamelCase__ , offset=0 , mask_token_sent=UpperCamelCase__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __a ( self : Optional[Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def __a ( self : Optional[int] , **snake_case__ : Dict ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __a ( self : str , snake_case__ : int ):
"""simple docstring"""
return ("This is a test", "This is a test")
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE_ = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
SCREAMING_SNAKE_CASE_ = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_torch
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['This is going to be way too long.' * 10_00, 'short example']
SCREAMING_SNAKE_CASE_ = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE_ = self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = self._large_tokenizer(
text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask.
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
SCREAMING_SNAKE_CASE_ = self._large_tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(
UpperCamelCase__ , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , ) | 360 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = """deformable_detr"""
lowerCAmelCase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[Any]=300 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Optional[int]=6 , UpperCamelCase__ : Union[str, Any]=1024 , UpperCamelCase__ : List[Any]=8 , UpperCamelCase__ : List[Any]=6 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Optional[int]=8 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]="relu" , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : List[str]=1.0 , UpperCamelCase__ : int=True , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Tuple="sine" , UpperCamelCase__ : Optional[Any]="resnet50" , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Any=False , UpperCamelCase__ : List[Any]=300 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : str=5 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=0.25 , UpperCamelCase__ : Optional[int]=False , **UpperCamelCase__ : Union[str, Any] , ) -> Tuple:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCamelCase =backbone_config.get('''model_type''' )
_UpperCamelCase =CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase =config_class.from_dict(UpperCamelCase__ )
_UpperCamelCase =use_timm_backbone
_UpperCamelCase =backbone_config
_UpperCamelCase =num_channels
_UpperCamelCase =num_queries
_UpperCamelCase =max_position_embeddings
_UpperCamelCase =d_model
_UpperCamelCase =encoder_ffn_dim
_UpperCamelCase =encoder_layers
_UpperCamelCase =encoder_attention_heads
_UpperCamelCase =decoder_ffn_dim
_UpperCamelCase =decoder_layers
_UpperCamelCase =decoder_attention_heads
_UpperCamelCase =dropout
_UpperCamelCase =attention_dropout
_UpperCamelCase =activation_dropout
_UpperCamelCase =activation_function
_UpperCamelCase =init_std
_UpperCamelCase =init_xavier_std
_UpperCamelCase =encoder_layerdrop
_UpperCamelCase =auxiliary_loss
_UpperCamelCase =position_embedding_type
_UpperCamelCase =backbone
_UpperCamelCase =use_pretrained_backbone
_UpperCamelCase =dilation
# deformable attributes
_UpperCamelCase =num_feature_levels
_UpperCamelCase =encoder_n_points
_UpperCamelCase =decoder_n_points
_UpperCamelCase =two_stage
_UpperCamelCase =two_stage_num_proposals
_UpperCamelCase =with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase =class_cost
_UpperCamelCase =bbox_cost
_UpperCamelCase =giou_cost
# Loss coefficients
_UpperCamelCase =mask_loss_coefficient
_UpperCamelCase =dice_loss_coefficient
_UpperCamelCase =bbox_loss_coefficient
_UpperCamelCase =giou_loss_coefficient
_UpperCamelCase =eos_coefficient
_UpperCamelCase =focal_alpha
_UpperCamelCase =disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase__ ( self : Tuple ) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Tuple ) -> int:
return self.d_model
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase =self.backbone_config.to_dict()
_UpperCamelCase =self.__class__.model_type
return output
| 404 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : Optional[Any] )-> int:
snake_case__ : int = parent
def __lowerCAmelCase ( self : Union[str, Any] )-> Dict:
return {}
def lowerCAmelCase__ ( ):
"""simple docstring"""
snake_case__ : List[Any] = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
snake_case__ : Dict = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class _A ( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_lowercase = MarkupLMFeatureExtractor if is_bsa_available() else None
def __lowerCAmelCase ( self : Optional[int] )-> Tuple:
snake_case__ : int = MarkupLMFeatureExtractionTester(self )
@property
def __lowerCAmelCase ( self : Any )-> Tuple:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __lowerCAmelCase ( self : Any )-> List[Any]:
# Initialize feature_extractor
snake_case__ : Dict = self.feature_extraction_class()
# Test not batched input
snake_case__ : Union[str, Any] = get_html_strings()[0]
snake_case__ : Optional[Any] = feature_extractor(lowerCamelCase )
# fmt: off
snake_case__ : Optional[int] = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
snake_case__ : Dict = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , lowerCamelCase )
self.assertEqual(encoding.xpaths , lowerCamelCase )
# Test batched
snake_case__ : List[Any] = get_html_strings()
snake_case__ : Optional[int] = feature_extractor(lowerCamelCase )
# fmt: off
snake_case__ : int = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
snake_case__ : Tuple = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCamelCase )
self.assertEqual(encoding.xpaths , lowerCamelCase )
| 172 |
'''simple docstring'''
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
return abs(UpperCAmelCase ) if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
snake_case__ , snake_case__ : Tuple = y, x % y
return abs(UpperCAmelCase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
try:
snake_case__ : Optional[Any] = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
snake_case__ : Optional[int] = int(nums[0] )
snake_case__ : Optional[int] = int(nums[1] )
print(
f"""greatest_common_divisor({num_a}, {num_a}) = """
f"""{greatest_common_divisor(UpperCAmelCase , UpperCAmelCase )}""" )
print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCAmelCase , UpperCAmelCase )}""" )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 172 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ : Any = get_sagemaker_input()
else:
UpperCAmelCase__ : List[str] = get_cluster_input()
return config
def a__ ( lowerCAmelCase__=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : List[Any] = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ : Any = args.config_file
else:
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase__ : int = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowerCAmelCase__ )
else:
config.to_yaml_file(lowerCAmelCase__ )
print(F"""accelerate configuration saved at {config_file}""" )
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[int] = config_command_parser()
UpperCAmelCase__ : Any = parser.parse_args()
config_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 75 | import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=None , _UpperCAmelCase="no" , _UpperCAmelCase="29500" ):
lowerCamelCase_: List[str] = False
lowerCamelCase_: Dict = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
lowerCamelCase_: Dict = True
elif "IPython" in sys.modules:
lowerCamelCase_: Dict = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
lowerCamelCase_: str = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , _UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
lowerCamelCase_: Optional[Any] = 8
lowerCamelCase_: List[Any] = PrepareForLaunch(_UpperCAmelCase , distributed_type="""TPU""" )
print(f"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*_UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr="""127.0.01""" , master_port=_UpperCAmelCase , mixed_precision=_UpperCAmelCase ):
lowerCamelCase_: Tuple = PrepareForLaunch(_UpperCAmelCase , distributed_type="""MULTI_GPU""" )
print(f"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase_: List[Any] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
lowerCamelCase_: Optional[Any] = PrepareForLaunch(_UpperCAmelCase , debug=_UpperCAmelCase )
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method="""fork""" )
| 423 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
UpperCAmelCase_ = 'src/transformers'
# Matches is_xxx_available()
UpperCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase_ = re.compile(R"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase_ = re.compile(R"""^\s*else:""")
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
if _re_test_backend.search(lowerCamelCase_ ) is None:
return None
_snake_case = [b[0] for b in _re_backend.findall(lowerCamelCase_ )]
backends.sort()
return "_and_".join(lowerCamelCase_ )
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> List[Any]:
'''simple docstring'''
with open(lowerCamelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_snake_case = f.readlines()
_snake_case = 0
while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
_snake_case = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
_snake_case = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase_ ):
_snake_case = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0]
_snake_case = re.findall('\[([^\]]+)\]' , lowerCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
_snake_case = _re_import_struct_key_value.search(lowerCamelCase_ )
if single_line_import_search is not None:
_snake_case = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
_snake_case = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_snake_case = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
_snake_case = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None:
_snake_case = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(', ' )
_snake_case = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif _re_between_brackets.search(lowerCamelCase_ ) is not None:
_snake_case = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(', ' )
_snake_case = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif _re_quote_object.search(lowerCamelCase_ ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
_snake_case = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_snake_case = []
while (
line_index < len(lowerCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
_snake_case = lines[line_index]
_snake_case = _re_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_snake_case = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_snake_case = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
_snake_case = lines[line_index]
_snake_case = _re_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_snake_case = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
def find_duplicates(UpperCamelCase__ : Any ):
return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_snake_case = []
for key in import_dict_objects.keys():
_snake_case = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_snake_case = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_snake_case = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
_snake_case = os.path.join(lowerCamelCase_ , '__init__.py' )
_snake_case = parse_init(lowerCamelCase_ )
if objects is not None:
_snake_case = analyze_results(*lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
_snake_case = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) > 0:
raise ValueError('\n\n'.join(lowerCamelCase_ ) )
def lowerCamelCase__ ( ) -> List[Any]:
'''simple docstring'''
_snake_case = []
for path, directories, files in os.walk(lowerCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowerCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase_ ) / folder).glob('*.py' ) ) ) == 0:
continue
_snake_case = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) )
_snake_case = short_path.replace(os.path.sep , '.' )
submodules.append(lowerCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
_snake_case = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) )
_snake_case = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowerCamelCase_ )
return submodules
UpperCAmelCase_ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def lowerCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
_snake_case = importlib.util.spec_from_file_location(
'transformers' , os.path.join(lowerCamelCase_ , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_snake_case = spec.loader.load_module()
_snake_case = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowerCamelCase_ ) > 0:
_snake_case = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 702 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 541 | 0 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while b:
UpperCamelCase_: Dict = b, a % b
return a
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b )
def A__ ( ) -> List[str]:
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 548 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Any = logging.get_logger(__name__)
# TODO Update this
A__ : int = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :str = "esm"
def __init__( self : Optional[int] , snake_case__ : Tuple=None , snake_case__ : List[Any]=None , snake_case__ : str=None , snake_case__ : Union[str, Any]=768 , snake_case__ : Tuple=12 , snake_case__ : List[Any]=12 , snake_case__ : int=3072 , snake_case__ : Dict=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : List[str]=1026 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=1E-12 , snake_case__ : Optional[int]="absolute" , snake_case__ : Any=True , snake_case__ : int=None , snake_case__ : Dict=False , snake_case__ : Tuple=False , snake_case__ : Optional[Any]=None , snake_case__ : Optional[Any]=None , **snake_case__ : List[Any] , ):
super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ )
lowerCamelCase_ : Tuple =vocab_size
lowerCamelCase_ : int =hidden_size
lowerCamelCase_ : Dict =num_hidden_layers
lowerCamelCase_ : Dict =num_attention_heads
lowerCamelCase_ : str =intermediate_size
lowerCamelCase_ : Dict =hidden_dropout_prob
lowerCamelCase_ : int =attention_probs_dropout_prob
lowerCamelCase_ : int =max_position_embeddings
lowerCamelCase_ : Tuple =initializer_range
lowerCamelCase_ : Any =layer_norm_eps
lowerCamelCase_ : Tuple =position_embedding_type
lowerCamelCase_ : Optional[Any] =use_cache
lowerCamelCase_ : List[Any] =emb_layer_norm_before
lowerCamelCase_ : Union[str, Any] =token_dropout
lowerCamelCase_ : int =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowerCamelCase_ : int =EsmFoldConfig()
elif isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : int =EsmFoldConfig(**snake_case__ )
lowerCamelCase_ : Optional[int] =esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowerCamelCase_ : List[Any] =get_default_vocab_list()
else:
lowerCamelCase_ : Optional[int] =vocab_list
else:
lowerCamelCase_ : str =None
lowerCamelCase_ : Any =None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , snake_case__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : List[Any] =super().to_dict()
if isinstance(self.esmfold_config , snake_case__ ):
lowerCamelCase_ : Optional[int] =self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
_UpperCAmelCase :str = None
_UpperCAmelCase :bool = True
_UpperCAmelCase :bool = False
_UpperCAmelCase :bool = False
_UpperCAmelCase :bool = False
_UpperCAmelCase :float = 0
_UpperCAmelCase :bool = True
_UpperCAmelCase :bool = False
_UpperCAmelCase :int = 128
_UpperCAmelCase :"TrunkConfig" = None
def UpperCAmelCase__ ( self : Dict ):
if self.trunk is None:
lowerCamelCase_ : List[Any] =TrunkConfig()
elif isinstance(self.trunk , snake_case__ ):
lowerCamelCase_ : str =TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : List[Any] =asdict(self )
lowerCamelCase_ : Tuple =self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
_UpperCAmelCase :int = 48
_UpperCAmelCase :int = 1024
_UpperCAmelCase :int = 128
_UpperCAmelCase :int = 32
_UpperCAmelCase :int = 32
_UpperCAmelCase :int = 32
_UpperCAmelCase :float = 0
_UpperCAmelCase :float = 0
_UpperCAmelCase :bool = False
_UpperCAmelCase :int = 4
_UpperCAmelCase :Optional[int] = 128
_UpperCAmelCase :"StructureModuleConfig" = None
def UpperCAmelCase__ ( self : Dict ):
if self.structure_module is None:
lowerCamelCase_ : Union[str, Any] =StructureModuleConfig()
elif isinstance(self.structure_module , snake_case__ ):
lowerCamelCase_ : Optional[int] =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowerCamelCase_ : str =self.sequence_state_dim // self.sequence_head_width
lowerCamelCase_ : Optional[Any] =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Tuple =asdict(self )
lowerCamelCase_ : Any =self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
_UpperCAmelCase :int = 384
_UpperCAmelCase :int = 128
_UpperCAmelCase :int = 16
_UpperCAmelCase :int = 128
_UpperCAmelCase :int = 12
_UpperCAmelCase :int = 4
_UpperCAmelCase :int = 8
_UpperCAmelCase :float = 0.1
_UpperCAmelCase :int = 8
_UpperCAmelCase :int = 1
_UpperCAmelCase :int = 2
_UpperCAmelCase :int = 7
_UpperCAmelCase :int = 10
_UpperCAmelCase :float = 1e-8
_UpperCAmelCase :float = 1e5
def UpperCAmelCase__ ( self : Optional[Any] ):
return asdict(self )
def _snake_case ( ) -> Optional[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 153 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = Dict[str, Any]
lowerCAmelCase__ = List[Prediction]
@add_end_docstrings(lowerCamelCase_ )
class _a ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __lowerCAmelCase ( self , **lowerCAmelCase_ ):
_lowercase ={}
if "threshold" in kwargs:
_lowercase =kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =load_image(lowerCAmelCase_ )
_lowercase =torch.IntTensor([[image.height, image.width]] )
_lowercase =self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_lowercase =self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_lowercase =target_size
return inputs
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =model_inputs.pop("target_size" )
_lowercase =self.model(**lowerCAmelCase_ )
_lowercase =outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_lowercase =model_inputs["bbox"]
return model_outputs
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0.9 ):
_lowercase =model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_lowercase , _lowercase =target_size[0].tolist()
def unnormalize(lowerCAmelCase_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_lowercase , _lowercase =model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_lowercase =[self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_lowercase =[unnormalize(lowerCAmelCase_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_lowercase =["score", "label", "box"]
_lowercase =[dict(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) for vals in zip(scores.tolist() , lowerCAmelCase_ , lowerCAmelCase_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_lowercase =self.image_processor.post_process_object_detection(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_lowercase =raw_annotations[0]
_lowercase =raw_annotation["scores"]
_lowercase =raw_annotation["labels"]
_lowercase =raw_annotation["boxes"]
_lowercase =scores.tolist()
_lowercase =[self.model.config.idalabel[label.item()] for label in labels]
_lowercase =[self._get_bounding_box(lowerCAmelCase_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_lowercase =["score", "label", "box"]
_lowercase =[
dict(zip(lowerCAmelCase_ , lowerCAmelCase_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_lowercase , _lowercase , _lowercase , _lowercase =box.int().tolist()
_lowercase ={
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 594 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _a ( lowerCamelCase_ ):
"""simple docstring"""
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
return 0.0
def __lowerCamelCase ( __a : np.ndarray , __a : int ) -> tuple[int | float, int | float]:
_lowercase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowercase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowerCamelCase ( __a : FilterType , __a : int ) -> None:
_lowercase =512
_lowercase =[1] + [0] * (size - 1)
_lowercase =[filter_type.process(__a ) for item in inputs]
_lowercase =[0] * (samplerate - size) # zero-padding
outputs += filler
_lowercase =np.abs(np.fft.fft(__a ) )
_lowercase =20 * np.logaa(__a )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowercase =get_bounds(__a , __a )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__a )
plt.show()
def __lowerCamelCase ( __a : FilterType , __a : int ) -> None:
_lowercase =512
_lowercase =[1] + [0] * (size - 1)
_lowercase =[filter_type.process(__a ) for item in inputs]
_lowercase =[0] * (samplerate - size) # zero-padding
outputs += filler
_lowercase =np.angle(np.fft.fft(__a ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__a , -2 * pi ) )
plt.show()
| 594 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class a ( UpperCamelCase_ ):
__lowercase = """falcon"""
__lowercase = ["""past_key_values"""]
def __init__( self , __UpperCamelCase=6_50_24 , __UpperCamelCase=45_44 , __UpperCamelCase=32 , __UpperCamelCase=71 , __UpperCamelCase=1E-5 , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=11 , __UpperCamelCase=11 , **__UpperCamelCase , )-> List[Any]:
'''simple docstring'''
A__ : Any =vocab_size
# Backward compatibility with n_embed kwarg
A__ : Optional[Any] =kwargs.pop('''n_embed''' , __UpperCamelCase )
A__ : Any =hidden_size if n_embed is None else n_embed
A__ : Any =num_hidden_layers
A__ : Tuple =num_attention_heads
A__ : str =layer_norm_epsilon
A__ : str =initializer_range
A__ : Optional[Any] =use_cache
A__ : Tuple =hidden_dropout
A__ : Any =attention_dropout
A__ : int =bos_token_id
A__ : Tuple =eos_token_id
A__ : Any =num_attention_heads if num_kv_heads is None else num_kv_heads
A__ : Optional[Any] =alibi
A__ : Tuple =new_decoder_architecture
A__ : List[Any] =multi_query # Ignored when new_decoder_architecture is True
A__ : Optional[Any] =parallel_attn
A__ : Tuple =bias
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase_ ( self )-> str:
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def lowerCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
return not self.alibi
| 416 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : int = 16
__lowerCamelCase : Tuple = 32
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = 1_6 ) -> int:
A__ : Tuple =AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Union[str, Any] =DatasetDict(
{
'''train''': dataset['''train'''].select(snake_case_ ),
'''validation''': dataset['''train'''].select(snake_case_ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(snake_case_ ):
# max_length=None => use the model max length (it's actually the default)
A__ : int =tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=snake_case_, max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[Any] =datasets.map(
snake_case_, batched=snake_case_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : Tuple =tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(snake_case_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[int] =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : Tuple =1_6
elif accelerator.mixed_precision != "no":
A__ : List[Any] =8
else:
A__ : Any =None
return tokenizer.pad(
snake_case_, padding='''longest''', max_length=snake_case_, pad_to_multiple_of=snake_case_, return_tensors='''pt''', )
# Instantiate dataloaders.
A__ : Optional[int] =DataLoader(
tokenized_datasets['''train'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
A__ : List[Any] =DataLoader(
tokenized_datasets['''validation'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
A__ : List[str] =DataLoader(
tokenized_datasets['''test'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
return train_dataloader, eval_dataloader, test_dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]:
# New Code #
A__ : Union[str, Any] =[]
# Download the dataset
A__ : Optional[int] =load_dataset('''glue''', '''mrpc''' )
# Create our splits
A__ : List[Any] =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ : Dict =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Any =config['''lr''']
A__ : Optional[Any] =int(config['''num_epochs'''] )
A__ : Dict =int(config['''seed'''] )
A__ : Tuple =int(config['''batch_size'''] )
A__ : Union[str, Any] =evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Any =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : Optional[Any] =batch_size // MAX_GPU_BATCH_SIZE
A__ : Any =MAX_GPU_BATCH_SIZE
set_seed(snake_case_ )
# New Code #
# Create our folds:
A__ : List[str] =kfold.split(np.zeros(datasets['''train'''].num_rows ), datasets['''train''']['''label'''] )
A__ : Any =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(snake_case_ ):
A__ , A__ , A__ : str =get_fold_dataloaders(
snake_case_, snake_case_, snake_case_, snake_case_, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Tuple =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : str =model.to(accelerator.device )
# Instantiate optimizer
A__ : Tuple =AdamW(params=model.parameters(), lr=snake_case_ )
# Instantiate scheduler
A__ : List[Any] =get_linear_schedule_with_warmup(
optimizer=snake_case_, num_warmup_steps=1_0_0, num_training_steps=(len(snake_case_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Tuple =accelerator.prepare(
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : str =model(**snake_case_ )
A__ : List[Any] =outputs.loss
A__ : int =loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Dict =model(**snake_case_ )
A__ : Any =outputs.logits.argmax(dim=-1 )
A__ , A__ : List[str] =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case_, references=snake_case_, )
A__ : Dict =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:', snake_case_ )
# New Code #
# We also run predictions on the test set at the very end
A__ : str =[]
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : str =model(**snake_case_ )
A__ : Dict =outputs.logits
A__ , A__ : Any =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(snake_case_, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ : List[str] =torch.cat(snake_case_, dim=0 )
A__ : Union[str, Any] =torch.stack(snake_case_, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ : List[Any] =metric.compute(predictions=snake_case_, references=snake_case_ )
accelerator.print('''Average test metrics from all folds:''', snake_case_ )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
A__ : int =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=snake_case_, default=snake_case_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''', type=snake_case_, default=3, help='''The number of splits to perform across the dataset''' )
A__ : List[str] =parser.parse_args()
A__ : Optional[Any] ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(snake_case_, snake_case_ )
if __name__ == "__main__":
main()
| 416 | 1 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
__a : List[str] = {}
__a : Optional[Any] = job['started_at']
__a : Optional[int] = job['completed_at']
__a : Optional[int] = date_parser.parse(lowerCamelCase_ )
__a : List[str] = date_parser.parse(lowerCamelCase_ )
__a : Dict = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__a : Any = start
__a : Any = end
__a : int = duration_in_min
return job_info
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict=None ):
__a : Tuple = None
if token is not None:
__a : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
__a : Dict = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__a : Any = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ ).json()
__a : List[str] = {}
try:
job_time.update({job['name']: extract_time_from_single_job(lowerCamelCase_ ) for job in result['jobs']} )
__a : Union[str, Any] = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__a : List[Any] = requests.get(url + f'''&page={i + 2}''' , headers=lowerCamelCase_ ).json()
job_time.update({job['name']: extract_time_from_single_job(lowerCamelCase_ ) for job in result['jobs']} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"{k}: {v['duration']}")
| 577 |
class _UpperCamelCase:
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
__a : List[Any] = None
__a : Tuple = None
__a : int = graph
self._normalize_graph(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : str = len(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = None
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if sources is int:
__a : Union[str, Any] = [sources]
if sinks is int:
__a : Dict = [sinks]
if len(SCREAMING_SNAKE_CASE__ ) == 0 or len(SCREAMING_SNAKE_CASE__ ) == 0:
return
__a : Union[str, Any] = sources[0]
__a : Tuple = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE__ ) > 1 or len(SCREAMING_SNAKE_CASE__ ) > 1:
__a : List[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__a : Optional[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__a : List[str] = max_input_flow
__a : str = 0
__a : Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__a : List[Any] = max_input_flow
__a : Union[str, Any] = size - 1
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
__a : Any = algorithm(self )
class _UpperCamelCase:
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : Union[str, Any] = flow_network
__a : str = flow_network.verticesCount
__a : Dict = flow_network.sourceIndex
__a : Dict = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__a : List[str] = flow_network.graph
__a : Optional[Any] = False
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if not self.executed:
self._algorithm()
__a : Optional[int] = True
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
pass
class _UpperCamelCase( __lowerCamelCase ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
# use this to save your result
__a : Tuple = -1
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class _UpperCamelCase( __lowerCamelCase ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = [[0] * self.verticies_count for i in range(self.verticies_count )]
__a : Any = [0] * self.verticies_count
__a : int = [0] * self.verticies_count
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__a : Dict = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__a : Optional[int] = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
__a : Tuple = vertices_list[i]
__a : Dict = self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE__ ) )
__a : List[str] = 0
else:
i += 1
__a : List[Any] = sum(self.preflow[self.source_index] )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.relabel(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : Optional[int] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__a : List[str] = self.heights[to_index]
if min_height is not None:
__a : str = min_height + 1
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [0]
SCREAMING_SNAKE_CASE__ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
SCREAMING_SNAKE_CASE__ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
SCREAMING_SNAKE_CASE__ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
SCREAMING_SNAKE_CASE__ = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}")
| 577 | 1 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase__ ( lowercase , lowercase , lowercase = "x" , lowercase = 10**-10 , lowercase = 1 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = symbols(lowercase )
SCREAMING_SNAKE_CASE : Tuple = lambdify(lowercase , lowercase )
SCREAMING_SNAKE_CASE : str = lambdify(lowercase , diff(lowercase , lowercase ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = starting_point
while True:
if diff_function(lowercase ) != 0:
SCREAMING_SNAKE_CASE : str = prev_guess - multiplicity * func(lowercase ) / diff_function(
lowercase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE : str = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 62 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase_ = Features({"text": Value("string" )} )
lowerCamelCase_ = Features({"labels": ClassLabel} )
lowerCamelCase_ = "text"
lowerCamelCase_ = "labels"
def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def _snake_case ( self :str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
} | 6 | 0 |
"""simple docstring"""
from random import randint, random
def snake_case (A_ :int , A_ :int , A_ :int , A_ :bool = False , A_ :bool = False , A_ :int = 5 , ):
'''simple docstring'''
a : Optional[Any] = [[-1] * number_of_cells] # Create a highway without any car
a : Any = 0
a : Dict = max(A_ , 0 )
while i < number_of_cells:
a : Dict = (
randint(0 , A_ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def snake_case (A_ :list , A_ :int ):
'''simple docstring'''
a : Any = 0
a : List[str] = highway_now[car_index + 1 :]
for cell in range(len(A_ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(A_ , -1 )
def snake_case (A_ :list , A_ :float , A_ :int ):
'''simple docstring'''
a : Union[str, Any] = len(A_ )
# Beforce calculations, the highway is empty
a : List[str] = [-1] * number_of_cells
for car_index in range(A_ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
a : Union[str, Any] = min(highway_now[car_index] + 1 , A_ )
# Number of empty cell before the next car
a : Union[str, Any] = get_distance(A_ , A_ ) - 1
# We can't have the car causing an accident
a : Optional[int] = min(next_highway[car_index] , A_ )
if random() < probability:
# Randomly, a driver will slow down
a : List[Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def snake_case (A_ :list , A_ :int , A_ :float , A_ :int ):
'''simple docstring'''
a : List[Any] = len(highway[0] )
for i in range(A_ ):
a : int = update(highway[i] , A_ , A_ )
a : Dict = [-1] * number_of_cells
for car_index in range(A_ ):
a : List[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
a : Dict = (car_index + speed) % number_of_cells
# Commit the change of position
a : Any = speed
highway.append(A_ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 118 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def snake_case (A_ :int = 1_0_0_0_0_0_0 , A_ :int = 1_0 ):
'''simple docstring'''
a : defaultdict = defaultdict(A_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
a : Optional[Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
a : Union[str, Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(A_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 118 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 458 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : str = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
], dtype=tf.floataa, )
lowercase_ : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]], dtype=tf.intaa, ) # expected non filtered idx as noted above
lowercase_ : List[Any] = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023], dtype=tf.floataa, ) # expected non filtered values as noted above
lowercase_ : int = tf_top_k_top_p_filtering(snake_case__, top_k=10, top_p=0.6, min_tokens_to_keep=4 )
lowercase_ : Dict = output[output != -float("""inf""" )]
lowercase_ : Tuple = tf.cast(
tf.where(tf.not_equal(snake_case__, tf.constant(-float("""inf""" ), dtype=tf.floataa ) ) ), dtype=tf.intaa, )
tf.debugging.assert_near(snake_case__, snake_case__, rtol=1E-12 )
tf.debugging.assert_equal(snake_case__, snake_case__ )
@require_tf
class UpperCamelCase__ ( unittest.TestCase , lowerCamelCase__ ):
'''simple docstring'''
if is_tf_available():
__a : Optional[int] = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def snake_case__ ( self ) -> str:
"""simple docstring"""
# TF-only test: tf.saved_model export
lowercase_ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ : int = 2
lowercase_ : Optional[Any] = 2
class UpperCamelCase__ ( tf.Module ):
'''simple docstring'''
def __init__( self, snake_case__ ) -> Any:
"""simple docstring"""
super(snake_case__, self ).__init__()
lowercase_ : int = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length), tf.intaa, name="""input_ids""" ),
tf.TensorSpec((None, input_length), tf.intaa, name="""attention_mask""" ),
), jit_compile=snake_case__, )
def snake_case__ ( self, snake_case__, snake_case__ ) -> int:
"""simple docstring"""
lowercase_ : Tuple = self.model.generate(
input_ids=snake_case__, attention_mask=snake_case__, max_new_tokens=snake_case__, return_dict_in_generate=snake_case__, )
return {"sequences": outputs["sequences"]}
lowercase_ : Union[str, Any] = [[2, 0], [1_02, 1_03]]
lowercase_ : Tuple = [[1, 0], [1, 1]]
lowercase_ : int = DummyModel(model=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case__, snake_case__, signatures={"""serving_default""": dummy_model.serving} )
lowercase_ : List[Any] = tf.saved_model.load(snake_case__ ).signatures["""serving_default"""]
for batch_size in range(1, len(snake_case__ ) + 1 ):
lowercase_ : str = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
lowercase_ : Optional[int] = serving_func(**snake_case__ )["""sequences"""]
lowercase_ : Optional[int] = test_model.generate(**snake_case__, max_new_tokens=snake_case__ )
tf.debugging.assert_equal(snake_case__, snake_case__ )
@slow
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
# TF-only test: tf.saved_model export
lowercase_ : int = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ : Tuple = 1
lowercase_ : Tuple = 2
class UpperCamelCase__ ( tf.Module ):
'''simple docstring'''
def __init__( self, snake_case__ ) -> List[str]:
"""simple docstring"""
super(snake_case__, self ).__init__()
lowercase_ : Union[str, Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None), tf.intaa, name="""input_ids""" ),
tf.TensorSpec((batch_size, None), tf.intaa, name="""attention_mask""" ),
), jit_compile=snake_case__, )
def snake_case__ ( self, snake_case__, snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = self.model.generate(
input_ids=snake_case__, attention_mask=snake_case__, max_new_tokens=snake_case__, return_dict_in_generate=snake_case__, )
return {"sequences": outputs["sequences"]}
lowercase_ : Union[str, Any] = [[2], [1_02, 1_03]]
lowercase_ : List[str] = [[1], [1, 1]]
lowercase_ : Union[str, Any] = DummyModel(model=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case__, snake_case__, signatures={"""serving_default""": dummy_model.serving} )
lowercase_ : Optional[Any] = tf.saved_model.load(snake_case__ ).signatures["""serving_default"""]
for input_row in range(len(snake_case__ ) ):
lowercase_ : List[str] = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
lowercase_ : Union[str, Any] = serving_func(**snake_case__ )["""sequences"""]
lowercase_ : Optional[Any] = test_model.generate(**snake_case__, max_new_tokens=snake_case__ )
tf.debugging.assert_equal(snake_case__, snake_case__ )
@slow
@require_tensorflow_text
def snake_case__ ( self ) -> Any:
"""simple docstring"""
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""", filename="""spiece.model""", local_dir=snake_case__ )
class UpperCamelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase_ : Tuple = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(snake_case__, """spiece.model""" ), """rb""" ).read() )
lowercase_ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def snake_case__ ( self, snake_case__, *snake_case__, **snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = self.tokenizer.tokenize(snake_case__ )
lowercase_ , lowercase_ : Union[str, Any] = text.pad_model_inputs(
snake_case__, max_seq_length=64, pad_value=self.model.config.pad_token_id )
lowercase_ : str = self.model.generate(input_ids=snake_case__, attention_mask=snake_case__ )
return self.tokenizer.detokenize(snake_case__ )
lowercase_ : Optional[int] = CompleteSentenceTransformer()
lowercase_ : Union[str, Any] = tf.keras.layers.Input(shape=(1,), dtype=tf.string, name="""inputs""" )
lowercase_ : Any = complete_model(snake_case__ )
lowercase_ : Optional[Any] = tf.keras.Model(snake_case__, snake_case__ )
keras_model.save(snake_case__ )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
# Has PT equivalent: this test relies on random sampling
lowercase_ : Any = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
lowercase_ : List[str] = 14
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ : Dict = """Hello, my dog is cute and"""
lowercase_ : List[str] = tokenizer(snake_case__, return_tensors="""tf""" )
lowercase_ : Tuple = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ : int = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
lowercase_ : Optional[int] = model.generate(**snake_case__, eos_token_id=snake_case__, **snake_case__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowercase_ : int = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
lowercase_ : Optional[int] = model.generate(**snake_case__, eos_token_id=snake_case__, **snake_case__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__ ( self ) -> str:
"""simple docstring"""
# Has PT equivalent: ample use of framework-specific code
lowercase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowercase_ : int = """Hugging Face is a technology company based in New York and Paris."""
lowercase_ : int = bart_tokenizer(snake_case__, return_tensors="""tf""" ).input_ids
lowercase_ : Tuple = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowercase_ : Union[str, Any] = bart_model.generate(snake_case__ ).numpy()
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
def snake_case__ ( self, snake_case__, snake_case__=None, **snake_case__ ) -> str:
"""simple docstring"""
return super().call(snake_case__, **snake_case__ )
lowercase_ : Tuple = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowercase_ : str = bart_model.generate(snake_case__, foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(snake_case__, snake_case__ ) )
class UpperCamelCase__ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__ ( self, snake_case__, **snake_case__ ) -> List[str]:
"""simple docstring"""
return super().call(snake_case__, **snake_case__ )
lowercase_ : Optional[int] = FakeEncoder(bart_model.config, bart_model.model.shared )
lowercase_ : Any = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowercase_ : Any = bart_model.generate(snake_case__ ).numpy()
with self.assertRaises(snake_case__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(snake_case__, foo="""bar""" ) | 458 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any]=False ):
'''simple docstring'''
__lowerCamelCase : Optional[int] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCamelCase : List[Any] =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCamelCase : Optional[int] =''''''
else:
__lowerCamelCase : List[str] ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase : int =state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
__lowerCamelCase : Dict =state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : Optional[Any] =in_proj_weight[
: config.hidden_size, :
]
__lowerCamelCase : Dict =in_proj_bias[: config.hidden_size]
__lowerCamelCase : Union[str, Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase : Optional[int] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase : Optional[Any] =in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase : str =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Any =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] =[
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__lowerCamelCase : List[Any] =dct.pop(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] =val
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__lowerCamelCase : Any =ViTMSNConfig()
__lowerCamelCase : int =1000
__lowerCamelCase : Dict ='''datasets/huggingface/label-files'''
__lowerCamelCase : Dict ='''imagenet-1k-id2label.json'''
__lowerCamelCase : Tuple =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) )
__lowerCamelCase : List[Any] ={int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowerCamelCase : int =idalabel
__lowerCamelCase : List[str] ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__lowerCamelCase : Any =384
__lowerCamelCase : Union[str, Any] =1536
__lowerCamelCase : Any =6
elif "l16" in checkpoint_url:
__lowerCamelCase : Dict =1024
__lowerCamelCase : Any =4096
__lowerCamelCase : List[Any] =24
__lowerCamelCase : Union[str, Any] =16
__lowerCamelCase : str =0.1
elif "b4" in checkpoint_url:
__lowerCamelCase : Dict =4
elif "l7" in checkpoint_url:
__lowerCamelCase : List[str] =7
__lowerCamelCase : str =1024
__lowerCamelCase : Tuple =4096
__lowerCamelCase : Union[str, Any] =24
__lowerCamelCase : Tuple =16
__lowerCamelCase : Optional[int] =0.1
__lowerCamelCase : Union[str, Any] =ViTMSNModel(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Tuple =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''target_encoder''']
__lowerCamelCase : str =ViTImageProcessor(size=config.image_size )
remove_projection_head(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] =create_rename_keys(SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
__lowerCamelCase : Dict ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase : Tuple =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__lowerCamelCase : int =ViTImageProcessor(
size=config.image_size , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE )
__lowerCamelCase : Union[str, Any] =image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
__lowerCamelCase : Dict =model(**SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__lowerCamelCase : Union[str, Any] =torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
__lowerCamelCase : List[Any] =torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
__lowerCamelCase : int =torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
__lowerCamelCase : Union[str, Any] =torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
__lowerCamelCase : str =torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_UpperCamelCase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 363 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
__lowerCamelCase : Any =[]
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__lowerCamelCase : str =state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
__lowerCamelCase : Dict =in_proj_weight[
: encoder_config.hidden_size, :
]
__lowerCamelCase : Any =in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__lowerCamelCase : Dict =in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
__lowerCamelCase : Optional[int] =dct.pop(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] =val
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
__lowerCamelCase : List[Any] ='''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCamelCase : List[Any] ='''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__lowerCamelCase : int =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] =ViTConfig(image_size=384 , qkv_bias=SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] =TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__lowerCamelCase : Any =768
elif "large" in checkpoint_url:
# use ViT-large encoder
__lowerCamelCase : Optional[int] =1024
__lowerCamelCase : List[Any] =4096
__lowerCamelCase : Dict =24
__lowerCamelCase : Optional[Any] =16
__lowerCamelCase : Union[str, Any] =1024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCamelCase : Optional[int] =False
__lowerCamelCase : int ='''relu'''
__lowerCamelCase : Any =1024
__lowerCamelCase : str =True
__lowerCamelCase : List[Any] =False
__lowerCamelCase : Optional[int] =False
# load HuggingFace model
__lowerCamelCase : Dict =ViTModel(SCREAMING_SNAKE_CASE , add_pooling_layer=SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any =TrOCRForCausalLM(SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] =VisionEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
model.eval()
# load state_dict of original model, rename some keys
__lowerCamelCase : Optional[Any] =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' , check_hash=SCREAMING_SNAKE_CASE )['''model''']
__lowerCamelCase : Optional[int] =create_rename_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__lowerCamelCase : List[Any] =state_dict.pop(SCREAMING_SNAKE_CASE )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__lowerCamelCase : List[Any] =val
else:
__lowerCamelCase : Optional[Any] =val
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image
__lowerCamelCase : int =ViTImageProcessor(size=encoder_config.image_size )
__lowerCamelCase : Dict =RobertaTokenizer.from_pretrained('''roberta-large''' )
__lowerCamelCase : str =TrOCRProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCamelCase : str =processor(images=prepare_img(SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
# verify logits
__lowerCamelCase : List[Any] =torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__lowerCamelCase : Optional[int] =model(pixel_values=SCREAMING_SNAKE_CASE , decoder_input_ids=SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] =outputs.logits
__lowerCamelCase : Optional[Any] =torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
__lowerCamelCase : List[str] =torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
__lowerCamelCase : Any =torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
__lowerCamelCase : Optional[Any] =torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
__lowerCamelCase : Optional[Any] =torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , SCREAMING_SNAKE_CASE , atol=1E-3 ), "First elements of logits not as expected"
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_UpperCamelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 363 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger()
@dataclass
class __lowerCamelCase :
'''simple docstring'''
snake_case__ : nn.Module
snake_case__ : List[nn.Module] = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case__ : list = field(default_factory=__SCREAMING_SNAKE_CASE )
def a_ ( self , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : str = len(list(m.modules() ) ) == 1 or isinstance(a__ , nn.Convad ) or isinstance(a__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a__ )
def __call__( self , a__ ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a__ )
[x.remove() for x in self.handles]
return self
@property
def a_ ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __lowerCamelCase :
'''simple docstring'''
snake_case__ : nn.Module
snake_case__ : nn.Module
snake_case__ : int = 1
snake_case__ : List = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case__ : List = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case__ : bool = True
def __call__( self , a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = Tracker(self.dest )(a__ ).parametrized
__SCREAMING_SNAKE_CASE : str = Tracker(self.src )(a__ ).parametrized
__SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda a__ : type(a__ ) not in self.src_skip , a__ ) )
__SCREAMING_SNAKE_CASE : List[Any] = list(filter(lambda a__ : type(a__ ) not in self.dest_skip , a__ ) )
if len(a__ ) != len(a__ ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(a__ )} operations while'
f' destination module has {len(a__ )}.' )
for dest_m, src_m in zip(a__ , a__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , a__ ):
super().__init__()
__SCREAMING_SNAKE_CASE : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f'Unexpected layer name {k}'
__SCREAMING_SNAKE_CASE : str = len(a__ ) + 1
feature_blocks.append((f'res{block_index}', v) )
__SCREAMING_SNAKE_CASE : Tuple = nn.ModuleDict(a__ )
def a_ ( self , a__ ):
return get_trunk_forward_outputs(
a__ , out_feat_keys=a__ , feature_blocks=self._feature_blocks , )
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , a__ ):
# default to timm!
if x not in self:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.convert_name_to_timm(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = partial(lambda: (timm.create_model(a__ , pretrained=a__ ).eval(), None) )
else:
__SCREAMING_SNAKE_CASE : List[Any] = super().__getitem__(a__ )
return val
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __getitem__( self , a__ ):
if "seer" in x and "in1k" not in x:
__SCREAMING_SNAKE_CASE : Any = RegNetModel
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = RegNetForImageClassification
return val
def __A ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Tuple[str, str]] ):
"""simple docstring"""
for from_key, to_key in keys:
__SCREAMING_SNAKE_CASE : Optional[int] = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def __A ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] , _SCREAMING_SNAKE_CASE : RegNetConfig , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : bool = True , ):
"""simple docstring"""
print(f'Converting {name}...' )
with torch.no_grad():
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = from_model_func()
__SCREAMING_SNAKE_CASE : Any = our_model_func(_SCREAMING_SNAKE_CASE ).eval()
__SCREAMING_SNAKE_CASE : Any = ModuleTransfer(src=_SCREAMING_SNAKE_CASE , dest=_SCREAMING_SNAKE_CASE , raise_if_mismatch=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(_SCREAMING_SNAKE_CASE )
if from_state_dict is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__SCREAMING_SNAKE_CASE : Tuple = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
__SCREAMING_SNAKE_CASE : str = manually_copy_vissl_head(_SCREAMING_SNAKE_CASE , our_model.state_dict() , _SCREAMING_SNAKE_CASE )
our_model.load_state_dict(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = our_model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = (
our_outputs.logits if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else our_outputs.last_hidden_state
)
__SCREAMING_SNAKE_CASE : Dict = from_model(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = from_output[-1] if type(_SCREAMING_SNAKE_CASE ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__SCREAMING_SNAKE_CASE : str = our_outputs.hidden_states[-1]
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE : Optional[int] = 2_2_4 if "seer" not in name else 3_8_4
# we can use the convnext one
__SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
print(f'Pushed {name}' )
def __A ( _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : str = None , _SCREAMING_SNAKE_CASE : bool = True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = "imagenet-1k-id2label.json"
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1_0_0_0
__SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels)
__SCREAMING_SNAKE_CASE : Tuple = "huggingface/label-files"
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : List[str] = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
__SCREAMING_SNAKE_CASE : Any = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = idalabel
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = partial(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
__SCREAMING_SNAKE_CASE : Optional[int] = NameToOurModelFuncMap()
__SCREAMING_SNAKE_CASE : List[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , model_dir=str(_SCREAMING_SNAKE_CASE ) , map_location="cpu" )
__SCREAMING_SNAKE_CASE : Dict = model_func()
# check if we have a head, if yes add it
__SCREAMING_SNAKE_CASE : Optional[Any] = files["classy_state_dict"]["base_model"]["model"]
__SCREAMING_SNAKE_CASE : int = model_state_dict["trunk"]
model.load_state_dict(_SCREAMING_SNAKE_CASE )
return model.eval(), model_state_dict["heads"]
# pretrained
__SCREAMING_SNAKE_CASE : Any = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : int = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : str = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__SCREAMING_SNAKE_CASE : Tuple = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
__SCREAMING_SNAKE_CASE : int = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : int = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : Optional[int] = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__SCREAMING_SNAKE_CASE : Tuple = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return config, expected_shape
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowercase = parser.parse_args()
lowercase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 211 | 0 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list] ):
SCREAMING_SNAKE_CASE__ = current_set.copy()
for row_index, row in enumerate(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = row[0]
for column_index, column in enumerate(_lowerCamelCase ):
if magnitude == 0:
SCREAMING_SNAKE_CASE__ = column
continue
SCREAMING_SNAKE_CASE__ = column / magnitude
# Subtract to cancel term
SCREAMING_SNAKE_CASE__ = current_set[0]
SCREAMING_SNAKE_CASE__ = [first_row]
SCREAMING_SNAKE_CASE__ = current_set[1::]
for row in current_set:
SCREAMING_SNAKE_CASE__ = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_lowerCamelCase )
continue
for column_index in range(len(_lowerCamelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_lowerCamelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
SCREAMING_SNAKE_CASE__ = final_set[0]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
SCREAMING_SNAKE_CASE__ = simplify(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ = resultant
return final_set
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list] ):
if len(_lowerCamelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
SCREAMING_SNAKE_CASE__ = len(_lowerCamelCase ) + 1
if any(len(_lowerCamelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_lowerCamelCase , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_lowerCamelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
SCREAMING_SNAKE_CASE__ = equations.copy()
if any(0 in row for row in data_set ):
SCREAMING_SNAKE_CASE__ = data_set.copy()
SCREAMING_SNAKE_CASE__ = []
for row_index, row in enumerate(_lowerCamelCase ):
if 0 not in row:
SCREAMING_SNAKE_CASE__ = data_set.pop(_lowerCamelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ = data_set.copy()
SCREAMING_SNAKE_CASE__ = simplify(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ = simplified[::-1]
SCREAMING_SNAKE_CASE__ = []
for row in simplified:
SCREAMING_SNAKE_CASE__ = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
SCREAMING_SNAKE_CASE__ = row.copy()[: len(_lowerCamelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_lowerCamelCase ) == 0:
solutions.append(0 )
continue
SCREAMING_SNAKE_CASE__ = temp_row[1::]
SCREAMING_SNAKE_CASE__ = temp_row[::-1]
for column_index, column in enumerate(_lowerCamelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ = []
for item in solutions:
final.append(float(round(_lowerCamelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]])) | 705 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowerCamelCase = data_utils.TransfoXLTokenizer
_lowerCamelCase = data_utils.TransfoXLCorpus
_lowerCamelCase = data_utils
_lowerCamelCase = data_utils
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Tuple ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase__ , """rb""" ) as fp:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' )
SCREAMING_SNAKE_CASE__ = corpus.vocab.__dict__
torch.save(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(f'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
SCREAMING_SNAKE_CASE__ = os.path.abspath(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.abspath(UpperCamelCase__ )
print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
SCREAMING_SNAKE_CASE__ = TransfoXLConfig()
else:
SCREAMING_SNAKE_CASE__ = TransfoXLConfig.from_json_file(UpperCamelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = TransfoXLLMHeadModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'''Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
print(f'''Save configuration file to {os.path.abspath(UpperCamelCase__ )}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
_lowerCamelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 59 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 475 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
def snake_case__( self , lowercase ) -> Dict:
_a : str = 3
_a : Tuple = 250
_a : Union[str, Any] = ids_tensor((batch_size, length) , lowercase )
_a : Optional[Any] = torch.ones((batch_size, length) , device=lowercase , dtype=torch.float ) / length
return input_ids, scores
def snake_case__( self ) -> Tuple:
_a : str = self._get_tensors(5 )
_a : int = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowercase , lowercase ) )
_a : Optional[int] = self._get_tensors(9 )
self.assertFalse(criteria(lowercase , lowercase ) )
_a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowercase , lowercase ) )
def snake_case__( self ) -> Tuple:
_a : Any = MaxLengthCriteria(max_length=10 )
_a : int = self._get_tensors(5 )
self.assertFalse(criteria(lowercase , lowercase ) )
_a : Optional[int] = self._get_tensors(9 )
self.assertFalse(criteria(lowercase , lowercase ) )
_a : int = self._get_tensors(10 )
self.assertTrue(criteria(lowercase , lowercase ) )
def snake_case__( self ) -> Any:
_a : List[str] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_a : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(lowercase , lowercase ) )
_a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowercase , lowercase ) )
_a : List[str] = self._get_tensors(10 )
self.assertTrue(criteria(lowercase , lowercase ) )
_a : Any = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def snake_case__( self ) -> int:
_a : Any = self._get_tensors(5 )
_a : int = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowercase , lowercase ) )
_a : Tuple = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowercase , lowercase ) )
def snake_case__( self ) -> Optional[Any]:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowercase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_a : Union[str, Any] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowercase ) , 1 ) | 719 |
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_a : Optional[int] = mf_knapsack(i - 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
_a : str = max(
mf_knapsack(i - 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , mf_knapsack(i - 1 , UpperCAmelCase , UpperCAmelCase , j - wt[i - 1] ) + val[i - 1] , )
_a : Any = val
return f[i][j]
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
_a : int = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_a : Dict = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_a : List[Any] = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not (isinstance(UpperCAmelCase , (list, tuple) ) and isinstance(UpperCAmelCase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
_a : List[str] = len(UpperCAmelCase )
if num_items != len(UpperCAmelCase ):
_a : List[str] = (
'''The number of weights must be the same as the number of values.\n'''
F'But got {num_items} weights and {len(UpperCAmelCase )} values'
)
raise ValueError(UpperCAmelCase )
for i in range(UpperCAmelCase ):
if not isinstance(wt[i] , UpperCAmelCase ):
_a : Dict = (
'''All weights must be integers but got weight of '''
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(UpperCAmelCase )
_a , _a : str = knapsack(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_a : set = set()
_construct_solution(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return optimal_val, example_optional_set
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCAmelCase , UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
else:
optimal_set.add(UpperCAmelCase )
_construct_solution(UpperCAmelCase , UpperCAmelCase , i - 1 , j - wt[i - 1] , UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = [3, 2, 4, 4]
__lowerCamelCase = [4, 3, 2, 3]
__lowerCamelCase = 4
__lowerCamelCase = 6
__lowerCamelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__lowerCamelCase , __lowerCamelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__lowerCamelCase , __lowerCamelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset) | 307 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , _A : Any , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = parent
__SCREAMING_SNAKE_CASE : Union[str, Any] = 13
__SCREAMING_SNAKE_CASE : List[str] = 7
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Dict = 2
__SCREAMING_SNAKE_CASE : List[Any] = 99
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 32
__SCREAMING_SNAKE_CASE : Optional[int] = 2
__SCREAMING_SNAKE_CASE : List[str] = 4
__SCREAMING_SNAKE_CASE : List[str] = 0.1
__SCREAMING_SNAKE_CASE : List[Any] = 0.1
__SCREAMING_SNAKE_CASE : str = 512
__SCREAMING_SNAKE_CASE : Any = 16
__SCREAMING_SNAKE_CASE : Tuple = 2
__SCREAMING_SNAKE_CASE : str = 0.02
__SCREAMING_SNAKE_CASE : Optional[int] = 3
__SCREAMING_SNAKE_CASE : Optional[int] = 4
__SCREAMING_SNAKE_CASE : Optional[int] = '''last'''
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Tuple = 0
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__SCREAMING_SNAKE_CASE : int = None
if self.use_input_lengths:
__SCREAMING_SNAKE_CASE : Tuple = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__SCREAMING_SNAKE_CASE : Tuple = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : Tuple = None
__SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase__ ( self : Dict , _A : int , _A : Dict , _A : List[Any] , _A : Dict , _A : Dict , _A : List[Any] , _A : str , _A : Tuple , _A : List[Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = TFFlaubertModel(config=_A )
__SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A )
__SCREAMING_SNAKE_CASE : int = [input_ids, input_mask]
__SCREAMING_SNAKE_CASE : Optional[int] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , _A : List[str] , _A : Optional[int] , _A : List[Any] , _A : Dict , _A : int , _A : Tuple , _A : Optional[int] , _A : Optional[int] , _A : List[str] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = TFFlaubertWithLMHeadModel(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
__SCREAMING_SNAKE_CASE : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : List[str] , _A : Any , _A : Any , _A : List[Any] , _A : List[str] , _A : Tuple , _A : Any , _A : Optional[Any] , _A : Optional[int] , _A : str , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = TFFlaubertForQuestionAnsweringSimple(_A )
__SCREAMING_SNAKE_CASE : List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
__SCREAMING_SNAKE_CASE : str = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Tuple , _A : Any , _A : Any , _A : Optional[Any] , _A : Any , _A : str , _A : Optional[Any] , _A : Optional[int] , _A : Optional[Any] , _A : List[str] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = TFFlaubertForSequenceClassification(_A )
__SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
__SCREAMING_SNAKE_CASE : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : Dict , _A : Dict , _A : Any , _A : Tuple , _A : Optional[Any] , _A : Optional[int] , _A : List[str] , _A : int , _A : Dict , _A : Optional[int] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.num_labels
__SCREAMING_SNAKE_CASE : int = TFFlaubertForTokenClassification(config=_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : Optional[int] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any , _A : str , _A : List[str] , _A : Union[str, Any] , _A : Any , _A : Tuple , _A : str , _A : List[Any] , _A : Optional[Any] , _A : Dict , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.num_choices
__SCREAMING_SNAKE_CASE : Tuple = TFFlaubertForMultipleChoice(config=_A )
__SCREAMING_SNAKE_CASE : List[str] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__SCREAMING_SNAKE_CASE : int = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
),
) : Optional[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Tuple = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCAmelCase_ = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : List[str] , _A : str , _A : Any , _A : List[Any] , _A : str , _A : str ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = TFFlaubertModelTester(self )
__SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=_A , emb_dim=37 )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : int = TFFlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
__SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__SCREAMING_SNAKE_CASE : List[str] = model(_A )[0]
__SCREAMING_SNAKE_CASE : Tuple = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , _A )
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor(
[
[
[-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18],
[-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99],
[-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 74 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
UpperCamelCase__ = 'path-to-your-trained-model'
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
UpperCamelCase__ = 'A photo of sks dog in a bucket'
UpperCamelCase__ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 110 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = original_name.split('''.''' )[0]
UpperCAmelCase_ = key.split('''.''' )
UpperCAmelCase_ = int(key_list[key_list.index(_UpperCamelCase ) - 2] )
UpperCAmelCase_ = int(key_list[key_list.index(_UpperCamelCase ) - 1] )
UpperCAmelCase_ = orig_block_num - offset
UpperCAmelCase_ = key.replace(F"""{orig_block_num}.{layer_num}.{original_name}""" , F"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
UpperCAmelCase_ , UpperCAmelCase_ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
UpperCAmelCase_ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
UpperCAmelCase_ = key[: key.find('''proj''' )]
UpperCAmelCase_ = key.replace(_UpperCamelCase , F"""patch_embeddings.{total_embed_found}.""" )
UpperCAmelCase_ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
UpperCAmelCase_ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
UpperCAmelCase_ = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
UpperCAmelCase_ = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
UpperCAmelCase_ = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''norm1''' , '''before_norm''' )
if "norm2" in key:
UpperCAmelCase_ = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
UpperCAmelCase_ = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
UpperCAmelCase_ = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
UpperCAmelCase_ = key.replace('''head''' , '''classifier''' )
UpperCAmelCase_ = value
return new_state_dict
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return image
@torch.no_grad()
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = PoolFormerConfig()
# set attributes based on model_name
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = model_name[-3:]
UpperCAmelCase_ = 1000
UpperCAmelCase_ = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ = (1, 1000)
# set config attributes
UpperCAmelCase_ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
if size == "s12":
UpperCAmelCase_ = [2, 2, 6, 2]
UpperCAmelCase_ = [64, 128, 320, 512]
UpperCAmelCase_ = 4.0
UpperCAmelCase_ = 0.9
elif size == "s24":
UpperCAmelCase_ = [4, 4, 12, 4]
UpperCAmelCase_ = [64, 128, 320, 512]
UpperCAmelCase_ = 4.0
UpperCAmelCase_ = 0.9
elif size == "s36":
UpperCAmelCase_ = [6, 6, 18, 6]
UpperCAmelCase_ = [64, 128, 320, 512]
UpperCAmelCase_ = 4.0
UpperCAmelCase_ = 1E-6
UpperCAmelCase_ = 0.9
elif size == "m36":
UpperCAmelCase_ = [6, 6, 18, 6]
UpperCAmelCase_ = [96, 192, 384, 768]
UpperCAmelCase_ = 4.0
UpperCAmelCase_ = 1E-6
UpperCAmelCase_ = 0.95
elif size == "m48":
UpperCAmelCase_ = [8, 8, 24, 8]
UpperCAmelCase_ = [96, 192, 384, 768]
UpperCAmelCase_ = 4.0
UpperCAmelCase_ = 1E-6
UpperCAmelCase_ = 0.95
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor
UpperCAmelCase_ = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
# Prepare image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
UpperCAmelCase_ = torch.load(_UpperCamelCase , map_location=torch.device('''cpu''' ) )
# rename keys
UpperCAmelCase_ = rename_keys(_UpperCamelCase )
# create HuggingFace model and load state dict
UpperCAmelCase_ = PoolFormerForImageClassification(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Define image processor
UpperCAmelCase_ = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
UpperCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
UpperCAmelCase_ = model(_UpperCamelCase )
UpperCAmelCase_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
UpperCAmelCase_ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
UpperCAmelCase_ = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
UpperCAmelCase_ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
UpperCAmelCase_ = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
UpperCAmelCase_ = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
lowercase__ : List[str] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 708 | '''simple docstring'''
import re
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : str ):
'''simple docstring'''
try:
UpperCAmelCase_ = split_input(_UpperCamelCase )
if upper:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
return to_simple_case(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
try:
UpperCAmelCase_ = to_simple_case(_UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''_''' )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''-''' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 43 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=A_ ):
__UpperCAmelCase = ['''torch''', '''scipy''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> str:
requires_backends(self , ["""torch""", """scipy"""])
@classmethod
def UpperCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> Optional[int]:
requires_backends(cls , ["""torch""", """scipy"""])
@classmethod
def UpperCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> int:
requires_backends(cls , ["""torch""", """scipy"""])
| 88 |
import numpy
class snake_case_ :
def __init__( self : List[str] , _snake_case : numpy.ndarray , _snake_case : numpy.ndarray )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__lowerCAmelCase : Tuple = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__lowerCAmelCase : Union[str, Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__lowerCAmelCase : Dict = numpy.random.rand(3 , 1 )
# Real output values provided.
__lowerCAmelCase : Optional[int] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__lowerCAmelCase : Tuple = numpy.zeros(output_array.shape )
def UpperCAmelCase__ ( self : int )->numpy.ndarray:
'''simple docstring'''
__lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__lowerCAmelCase : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__lowerCAmelCase : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ ( self : int )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__lowerCAmelCase : Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__lowerCAmelCase : Dict = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ ( self : Any , _snake_case : numpy.ndarray , _snake_case : int , _snake_case : bool )->None:
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
__lowerCAmelCase : Tuple = self.feedforward()
self.back_propagation()
if give_loss:
__lowerCAmelCase : List[Any] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : numpy.ndarray )->int:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = input_arr
__lowerCAmelCase : str = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__lowerCAmelCase : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :numpy.ndarray ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :numpy.ndarray ) -> numpy.ndarray:
return (value) * (1 - (value))
def _SCREAMING_SNAKE_CASE ( ) -> int:
__lowerCAmelCase : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__lowerCAmelCase : Optional[Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__lowerCAmelCase : Union[str, Any] = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE , output_array=SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE , iterations=10 , give_loss=SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 504 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =(DEISMultistepScheduler,)
__UpperCAmelCase : Tuple =(("""num_inference_steps""", 2_5),)
def snake_case ( self , **__a ):
__lowerCAmelCase = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**__a )
return config
def snake_case ( self , __a=0 , **__a ):
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __a )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config(**__a )
__lowerCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
__lowerCAmelCase = scheduler_class.from_pretrained(__a )
new_scheduler.set_timesteps(__a )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase = sample, sample
for t in range(__a , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__lowerCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self ):
pass
def snake_case ( self , __a=0 , **__a ):
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __a )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
__lowerCAmelCase = scheduler_class.from_pretrained(__a )
# copy over dummy past residuals
new_scheduler.set_timesteps(__a )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__lowerCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self , __a=None , **__a ):
if scheduler is None:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**__a )
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**__a )
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = 10
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = model(__a , __a )
__lowerCAmelCase = scheduler.step(__a , __a , __a ).prev_sample
return sample
def snake_case ( self ):
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __a )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__a , "set_timesteps" ):
scheduler.set_timesteps(__a )
elif num_inference_steps is not None and not hasattr(__a , "set_timesteps" ):
__lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
__lowerCAmelCase = scheduler.timesteps[5]
__lowerCAmelCase = scheduler.timesteps[6]
__lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase = self.full_loop(scheduler=__a )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
__lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = self.full_loop(scheduler=__a )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case ( self ):
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def snake_case ( self ):
self.check_over_configs(thresholding=__a )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , algorithm_type="deis" , solver_order=__a , solver_type=__a , )
def snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def snake_case ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
__lowerCAmelCase = self.full_loop(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
assert not torch.isnan(__a ).any(), "Samples have nan numbers"
def snake_case ( self ):
self.check_over_configs(lower_order_final=__a )
self.check_over_configs(lower_order_final=__a )
def snake_case ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=__a , time_step=0 )
def snake_case ( self ):
__lowerCAmelCase = self.full_loop()
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case ( self ):
__lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def snake_case ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(thresholding=__a , dynamic_thresholding_ratio=0 )
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = 10
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = model(__a , __a )
__lowerCAmelCase = scheduler.step(__a , __a , __a ).prev_sample
assert sample.dtype == torch.floataa
| 282 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
A : Union[str, Any] = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _lowerCamelCase ( _UpperCamelCase = "dhaka" , _UpperCamelCase = 5 ):
'''simple docstring'''
__lowerCAmelCase = min(_UpperCamelCase , 50 ) # Prevent abuse!
__lowerCAmelCase = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
__lowerCAmelCase = requests.get("https://www.google.com/search" , params=_UpperCamelCase , headers=_UpperCamelCase )
__lowerCAmelCase = BeautifulSoup(html.text , "html.parser" )
__lowerCAmelCase = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
__lowerCAmelCase = json.dumps(_UpperCamelCase )
__lowerCAmelCase = json.loads(_UpperCamelCase )
__lowerCAmelCase = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , _UpperCamelCase , )
if not matched_google_image_data:
return 0
__lowerCAmelCase = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(_UpperCamelCase ) , )
__lowerCAmelCase = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , _UpperCamelCase , )
for index, fixed_full_res_image in enumerate(_UpperCamelCase ):
if index >= max_images:
return index
__lowerCAmelCase = bytes(_UpperCamelCase , "ascii" ).decode(
"unicode-escape" )
__lowerCAmelCase = bytes(_UpperCamelCase , "ascii" ).decode(
"unicode-escape" )
__lowerCAmelCase = urllib.request.build_opener()
__lowerCAmelCase = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(_UpperCamelCase )
__lowerCAmelCase = f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
urllib.request.urlretrieve( # noqa: S310
_UpperCamelCase , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
A : Any = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print("Please provide a search term.")
raise
| 282 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( A_ : Dict , A_ : Optional[int] , A_ : List[str] ):
"""simple docstring"""
a_ : List[Any] = LxmertConfig.from_json_file(A_ )
print(f'''Building PyTorch model from configuration: {config}''' )
a_ : List[Any] = LxmertForPreTraining(A_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A_ , A_ , A_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A_ )
if __name__ == "__main__":
__snake_case: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__snake_case: Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 577 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _snake_case ( A_ : Optional[Any] , A_ : List[str] , A_ : Any , A_ : Dict ):
"""simple docstring"""
if isinstance(A_ , A_ ):
a_ : Dict = np.full((len(A_ ), sequence_length, 2) , A_ )
else:
a_ : Tuple = np.full((len(A_ ), sequence_length) , A_ )
for i, tensor in enumerate(A_ ):
if padding_side == "right":
if isinstance(A_ , A_ ):
a_ : List[str] = tensor[:sequence_length]
else:
a_ : int = tensor[:sequence_length]
else:
if isinstance(A_ , A_ ):
a_ : Optional[int] = tensor[:sequence_length]
else:
a_ : Optional[int] = tensor[:sequence_length]
return out_tensor.tolist()
def _snake_case ( A_ : str ):
"""simple docstring"""
a_ : Optional[Any] = ord(A_ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
a_ : List[Any] = unicodedata.category(A_ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = -100
a_ = "pt"
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
import torch
a_ : List[Any] = """label""" if """label""" in features[0].keys() else """labels"""
a_ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
a_ : Union[str, Any] = self.tokenizer.pad(
lowerCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
a_ : Dict = torch.tensor(batch["""entity_ids"""] ).shape[1]
a_ : List[Any] = self.tokenizer.padding_side
if padding_side == "right":
a_ : List[str] = [
list(lowerCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase_ )) for label in labels
]
else:
a_ : int = [
[self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase_ )) + list(lowerCAmelCase_ ) for label in labels
]
a_ : int = [feature["""ner_tags"""] for feature in features]
a_ : Union[str, Any] = padding_tensor(lowerCAmelCase_ , -1 , lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Dict = [feature["""original_entity_spans"""] for feature in features]
a_ : Optional[Any] = padding_tensor(lowerCAmelCase_ , (-1, -1) , lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Any = {k: torch.tensor(lowerCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 577 | 1 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''pixel_values''']
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 2_5_5 , __lowerCAmelCase = True , __lowerCAmelCase = 8 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
__magic_name__ :Any = do_rescale
__magic_name__ :List[str] = rescale_factor
__magic_name__ :int = do_pad
__magic_name__ :Union[str, Any] = pad_size
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase ):
"""simple docstring"""
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ , __magic_name__ :List[Any] = get_image_size(__lowerCAmelCase )
__magic_name__ :int = (old_height // size + 1) * size - old_height
__magic_name__ :Any = (old_width // size + 1) * size - old_width
return pad(__lowerCAmelCase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :List[str] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ :str = do_pad if do_pad is not None else self.do_pad
__magic_name__ :Optional[int] = pad_size if pad_size is not None else self.pad_size
__magic_name__ :Union[str, Any] = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__magic_name__ :List[str] = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_rescale:
__magic_name__ :Dict = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_pad:
__magic_name__ :int = [self.pad(__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
__magic_name__ :Optional[int] = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
__magic_name__ :int = {'''pixel_values''': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 180 |
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__magic_name__ :Dict = mf_knapsack(i - 1, snake_case, snake_case, snake_case )
else:
__magic_name__ :Optional[Any] = max(
mf_knapsack(i - 1, snake_case, snake_case, snake_case ), mf_knapsack(i - 1, snake_case, snake_case, j - wt[i - 1] ) + val[i - 1], )
__magic_name__ :List[Any] = val
return f[i][j]
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1, n + 1 ):
for w_ in range(1, w + 1 ):
if wt[i - 1] <= w_:
__magic_name__ :List[str] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]], dp[i - 1][w_] )
else:
__magic_name__ :Optional[int] = dp[i - 1][w_]
return dp[n][w_], dp
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if not (isinstance(snake_case, (list, tuple) ) and isinstance(snake_case, (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
__magic_name__ :Dict = len(snake_case )
if num_items != len(snake_case ):
__magic_name__ :str = (
'''The number of weights must be the same as the number of values.\n'''
f'''But got {num_items} weights and {len(snake_case )} values'''
)
raise ValueError(snake_case )
for i in range(snake_case ):
if not isinstance(wt[i], snake_case ):
__magic_name__ :str = (
'''All weights must be integers but got weight of '''
f'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(snake_case )
__magic_name__ , __magic_name__ :Tuple = knapsack(snake_case, snake_case, snake_case, snake_case )
__magic_name__ :set = set()
_construct_solution(snake_case, snake_case, snake_case, snake_case, snake_case )
return optimal_val, example_optional_set
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(snake_case, snake_case, i - 1, snake_case, snake_case )
else:
optimal_set.add(snake_case )
_construct_solution(snake_case, snake_case, i - 1, j - wt[i - 1], snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [3, 2, 4, 4]
SCREAMING_SNAKE_CASE__ : Any = [4, 3, 2, 3]
SCREAMING_SNAKE_CASE__ : List[str] = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = 6
SCREAMING_SNAKE_CASE__ : str = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 180 | 1 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
__snake_case = ['audio_values', 'audio_mask']
def __init__( self, UpperCamelCase__=2048, UpperCamelCase__=1, UpperCamelCase__=[16, 16], UpperCamelCase__=128, UpperCamelCase__=4_4100, UpperCamelCase__=86, UpperCamelCase__=2048, UpperCamelCase__=0.0, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(
feature_size=UpperCamelCase__, sampling_rate=UpperCamelCase__, padding_value=UpperCamelCase__, **UpperCamelCase__, )
lowerCAmelCase_ = spectrogram_length
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = feature_size // self.patch_size[1]
lowerCAmelCase_ = n_fft
lowerCAmelCase_ = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase_ = sampling_rate
lowerCAmelCase_ = padding_value
lowerCAmelCase_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=UpperCamelCase__, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=UpperCamelCase__, norm='''slaney''', mel_scale='''slaney''', ).T
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = spectrogram(
UpperCamelCase__, window_function(self.n_fft, '''hann''' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='''dB''', db_range=80.0, )
lowerCAmelCase_ = log_spec[:, :-1]
lowerCAmelCase_ = log_spec - 20.0
lowerCAmelCase_ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = True, UpperCamelCase__ = None, UpperCamelCase__ = False, UpperCamelCase__ = False, **UpperCamelCase__, ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
f" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCAmelCase_ = isinstance(UpperCamelCase__, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
lowerCAmelCase_ = is_batched_numpy or (
isinstance(UpperCamelCase__, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__, np.ndarray ):
lowerCAmelCase_ = np.asarray(UpperCamelCase__, dtype=np.floataa )
elif isinstance(UpperCamelCase__, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], UpperCamelCase__ ):
lowerCAmelCase_ = [np.asarray(UpperCamelCase__, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase_ = np.array(UpperCamelCase__ ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase_ = np.ones([len(UpperCamelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase_ = padded_audio_features * self.padding_value
for i in range(len(UpperCamelCase__ ) ):
lowerCAmelCase_ = audio_features[i]
lowerCAmelCase_ = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase_ = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCAmelCase_ = {'''audio_values''': padded_audio_features}
lowerCAmelCase_ = BatchFeature(data=UpperCamelCase__, tensor_type=UpperCamelCase__ )
return encoded_inputs
| 431 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def __UpperCamelCase ( _A , _A , _A , _A ):
lowerCAmelCase_ = original_name.split('''.''' )[0]
lowerCAmelCase_ = key.split('''.''' )
lowerCAmelCase_ = int(key_list[key_list.index(_A ) - 2] )
lowerCAmelCase_ = int(key_list[key_list.index(_A ) - 1] )
lowerCAmelCase_ = orig_block_num - offset
lowerCAmelCase_ = key.replace(f"{orig_block_num}.{layer_num}.{original_name}" , f"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = OrderedDict()
lowerCAmelCase_ , lowerCAmelCase_ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowerCAmelCase_ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowerCAmelCase_ = key[: key.find('''proj''' )]
lowerCAmelCase_ = key.replace(_A , f"patch_embeddings.{total_embed_found}." )
lowerCAmelCase_ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowerCAmelCase_ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowerCAmelCase_ = replace_key_with_offset(_A , _A , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowerCAmelCase_ = replace_key_with_offset(_A , _A , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowerCAmelCase_ = replace_key_with_offset(_A , _A , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowerCAmelCase_ = replace_key_with_offset(_A , _A , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowerCAmelCase_ = replace_key_with_offset(_A , _A , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowerCAmelCase_ = replace_key_with_offset(_A , _A , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowerCAmelCase_ = key.replace('''head''' , '''classifier''' )
lowerCAmelCase_ = value
return new_state_dict
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
return image
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = PoolFormerConfig()
# set attributes based on model_name
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = model_name[-3:]
lowerCAmelCase_ = 1000
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = (1, 1000)
# set config attributes
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowerCAmelCase_ = [2, 2, 6, 2]
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 4.0
lowerCAmelCase_ = 0.9
elif size == "s24":
lowerCAmelCase_ = [4, 4, 12, 4]
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 4.0
lowerCAmelCase_ = 0.9
elif size == "s36":
lowerCAmelCase_ = [6, 6, 18, 6]
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 4.0
lowerCAmelCase_ = 1E-6
lowerCAmelCase_ = 0.9
elif size == "m36":
lowerCAmelCase_ = [6, 6, 18, 6]
lowerCAmelCase_ = [96, 192, 384, 768]
lowerCAmelCase_ = 4.0
lowerCAmelCase_ = 1E-6
lowerCAmelCase_ = 0.9_5
elif size == "m48":
lowerCAmelCase_ = [8, 8, 24, 8]
lowerCAmelCase_ = [96, 192, 384, 768]
lowerCAmelCase_ = 4.0
lowerCAmelCase_ = 1E-6
lowerCAmelCase_ = 0.9_5
else:
raise ValueError(f"Size {size} not supported" )
# load image processor
lowerCAmelCase_ = PoolFormerImageProcessor(crop_pct=_A )
# Prepare image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=_A , return_tensors='''pt''' ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
lowerCAmelCase_ = torch.load(_A , map_location=torch.device('''cpu''' ) )
# rename keys
lowerCAmelCase_ = rename_keys(_A )
# create HuggingFace model and load state dict
lowerCAmelCase_ = PoolFormerForImageClassification(_A )
model.load_state_dict(_A )
model.eval()
# Define image processor
lowerCAmelCase_ = PoolFormerImageProcessor(crop_pct=_A )
lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowerCAmelCase_ = model(_A )
lowerCAmelCase_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowerCAmelCase_ = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
lowerCAmelCase_ = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
lowerCAmelCase_ = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
lowerCAmelCase_ = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
lowerCAmelCase_ = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(f"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _A , atol=1E-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 431 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_A = logging.get_logger(__name__)
_A = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_A = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
_A = {
"""distilbert-base-uncased""": 5_1_2,
"""distilbert-base-uncased-distilled-squad""": 5_1_2,
"""distilbert-base-cased""": 5_1_2,
"""distilbert-base-cased-distilled-squad""": 5_1_2,
"""distilbert-base-german-cased""": 5_1_2,
"""distilbert-base-multilingual-cased""": 5_1_2,
}
_A = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :List[str] = VOCAB_FILES_NAMES
_lowerCamelCase :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase :str = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase :List[Any] = ["input_ids", "attention_mask"]
_lowerCamelCase :Union[str, Any] = DistilBertTokenizer
def __init__( self : int , UpperCamelCase : str=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any=True , UpperCamelCase : int="[UNK]" , UpperCamelCase : str="[SEP]" , UpperCamelCase : Dict="[PAD]" , UpperCamelCase : Tuple="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : str=True , UpperCamelCase : Any=None , **UpperCamelCase : str , ) -> Dict:
"""simple docstring"""
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCamelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ : Optional[Any] = getattr(UpperCamelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase__ : Union[str, Any] = do_lower_case
lowerCAmelCase__ : str = strip_accents
lowerCAmelCase__ : str = tokenize_chinese_chars
lowerCAmelCase__ : Tuple = normalizer_class(**UpperCamelCase )
lowerCAmelCase__ : Tuple = do_lower_case
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[int]=None ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 507 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 6_3_7_8_1_3_7
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
lowerCAmelCase__ : str = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowerCAmelCase__ : Optional[int] = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
lowerCAmelCase__ : List[Any] = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowerCAmelCase__ : Any = haversine_distance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowerCAmelCase__ : int = (b_lata + b_lata) / 2
lowerCAmelCase__ : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowerCAmelCase__ : Optional[int] = (sin(__UpperCAmelCase ) ** 2) * (cos(__UpperCAmelCase ) ** 2)
lowerCAmelCase__ : Dict = cos(sigma / 2 ) ** 2
lowerCAmelCase__ : Union[str, Any] = (sigma - sin(__UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowerCAmelCase__ : Tuple = (cos(__UpperCAmelCase ) ** 2) * (sin(__UpperCAmelCase ) ** 2)
lowerCAmelCase__ : int = sin(sigma / 2 ) ** 2
lowerCAmelCase__ : int = (sigma + sin(__UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 507 | 1 |
"""simple docstring"""
from torch import nn
def __a ( a ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' )
| 388 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase_ : int = 'trajectory_transformer'
lowerCAmelCase_ : Dict = ['past_key_values']
lowerCAmelCase_ : Optional[int] = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self :Optional[Any] , UpperCamelCase__ :Any=100 , UpperCamelCase__ :Optional[Any]=5 , UpperCamelCase__ :int=1 , UpperCamelCase__ :List[str]=1 , UpperCamelCase__ :Optional[int]=249 , UpperCamelCase__ :Union[str, Any]=6 , UpperCamelCase__ :Optional[int]=17 , UpperCamelCase__ :Tuple=25 , UpperCamelCase__ :int=4 , UpperCamelCase__ :int=4 , UpperCamelCase__ :Dict=128 , UpperCamelCase__ :str=0.1 , UpperCamelCase__ :Tuple=0.1 , UpperCamelCase__ :Tuple=0.1 , UpperCamelCase__ :Dict=0.0006 , UpperCamelCase__ :int=512 , UpperCamelCase__ :Tuple=0.02 , UpperCamelCase__ :Optional[int]=1E-12 , UpperCamelCase__ :Optional[int]=1 , UpperCamelCase__ :Any=True , UpperCamelCase__ :Any=1 , UpperCamelCase__ :Any=50_256 , UpperCamelCase__ :str=50_256 , **UpperCamelCase__ :Optional[Any] , ):
_a = vocab_size
_a = action_weight
_a = reward_weight
_a = value_weight
_a = max_position_embeddings
_a = block_size
_a = action_dim
_a = observation_dim
_a = transition_dim
_a = learning_rate
_a = n_layer
_a = n_head
_a = n_embd
_a = embd_pdrop
_a = attn_pdrop
_a = resid_pdrop
_a = initializer_range
_a = layer_norm_eps
_a = kaiming_initializer_range
_a = use_cache
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 388 | 1 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = False , __lowercase = False , __lowercase = None , __lowercase = None , **__lowercase , ):
super().__init__(
__lowercase , split=__lowercase , features=__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase , streaming=__lowercase , num_proc=__lowercase , **__lowercase , )
__lowerCAmelCase = field
__lowerCAmelCase = path_or_paths if isinstance(__lowercase , __lowercase ) else {self.split: path_or_paths}
__lowerCAmelCase = Json(
cache_dir=__lowercase , data_files=__lowercase , features=__lowercase , field=__lowercase , **__lowercase , )
def _snake_case (self ):
# Build iterable dataset
if self.streaming:
__lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=__lowercase , download_mode=__lowercase , verification_mode=__lowercase , base_path=__lowercase , num_proc=self.num_proc , )
__lowerCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=__lowercase , in_memory=self.keep_in_memory )
return dataset
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase = None , __lowercase = None , **__lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
__lowerCAmelCase = dataset
__lowerCAmelCase = path_or_buf
__lowerCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowerCAmelCase = num_proc
__lowerCAmelCase = '''utf-8'''
__lowerCAmelCase = to_json_kwargs
def _snake_case (self ):
__lowerCAmelCase = self.to_json_kwargs.pop('''path_or_buf''' , __lowercase )
__lowerCAmelCase = self.to_json_kwargs.pop('''orient''' , '''records''' )
__lowerCAmelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__lowerCAmelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__lowerCAmelCase = self.to_json_kwargs.pop('''compression''' , __lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__lowercase ) as buffer:
__lowerCAmelCase = self._write(file_obj=__lowercase , orient=__lowercase , lines=__lowercase , index=__lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
__lowerCAmelCase = self._write(
file_obj=self.path_or_buf , orient=__lowercase , lines=__lowercase , index=__lowercase , **self.to_json_kwargs )
return written
def _snake_case (self , __lowercase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = args
__lowerCAmelCase = query_table(
table=self.dataset.data , key=slice(__lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowerCAmelCase = batch.to_pandas().to_json(
path_or_buf=__lowercase , orient=__lowercase , lines=__lowercase , index=__lowercase , **__lowercase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase , ):
__lowerCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__lowerCAmelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__lowercase )
else:
__lowerCAmelCase , __lowerCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __lowercase , __lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__lowercase )
return written
| 474 |
'''simple docstring'''
import json
import sys
def __magic_name__( lowerCamelCase, lowerCamelCase):
with open(lowerCamelCase, encoding='''utf-8''') as f:
__lowerCAmelCase = json.load(lowerCamelCase)
__lowerCAmelCase = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(lowerCamelCase):
__lowerCAmelCase = results[benchmark_name]
__lowerCAmelCase = benchmark_name.split('''/''')[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""")
__lowerCAmelCase = '''| metric |'''
__lowerCAmelCase = '''|--------|'''
__lowerCAmelCase = '''| new / old (diff) |'''
for metric_name in sorted(lowerCamelCase):
__lowerCAmelCase = benchmark_res[metric_name]
__lowerCAmelCase = metric_vals['''new''']
__lowerCAmelCase = metric_vals.get('''old''', lowerCamelCase)
__lowerCAmelCase = metric_vals.get('''diff''', lowerCamelCase)
__lowerCAmelCase = F""" {new_val:f}""" if isinstance(lowerCamelCase, (int, float)) else '''None'''
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(lowerCamelCase, (int, float)) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(lowerCamelCase, (int, float)) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''')
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f:
f.writelines('''\n'''.join(lowerCamelCase))
if __name__ == "__main__":
_UpperCAmelCase : str = sys.argv[1]
_UpperCAmelCase : Optional[Any] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 474 | 1 |
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
A__ : int = getLogger(__name__)
A__ : Optional[int] = 'cuda' if torch.cuda.is_available() else 'cpu'
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 8 , _UpperCamelCase = DEFAULT_DEVICE , _UpperCamelCase=False , _UpperCamelCase="summarization" , _UpperCamelCase=None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase: Dict = Path(_UpperCamelCase ).open('''w''' , encoding='''utf-8''' )
_lowercase: Optional[int] = str(_UpperCamelCase )
_lowercase: Dict = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
if fpaa:
_lowercase: Tuple = model.half()
_lowercase: int = AutoTokenizer.from_pretrained(_UpperCamelCase )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
_lowercase: int = time.time()
# update config with task specific params
use_task_specific_params(_UpperCamelCase , _UpperCamelCase )
if prefix is None:
_lowercase: List[str] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_UpperCamelCase , _UpperCamelCase ) ) ):
_lowercase: Union[str, Any] = [prefix + text for text in examples_chunk]
_lowercase: Any = tokenizer(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase , padding='''longest''' ).to(_UpperCamelCase )
_lowercase: str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_UpperCamelCase , )
_lowercase: Dict = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
_lowercase: Optional[Any] = int(time.time() - start_time ) # seconds
_lowercase: Tuple = len(_UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _lowerCAmelCase ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _lowerCAmelCase ( _UpperCamelCase=True ):
"""simple docstring"""
_lowercase: int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=_UpperCamelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=_UpperCamelCase , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=_UpperCamelCase , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=_UpperCamelCase , required=_UpperCamelCase , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=_UpperCamelCase , required=_UpperCamelCase , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=_UpperCamelCase , required=_UpperCamelCase , default=_UpperCamelCase , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=_UpperCamelCase , required=_UpperCamelCase , default=_UpperCamelCase , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=_UpperCamelCase , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_UpperCamelCase , default=8 , required=_UpperCamelCase , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=_UpperCamelCase , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_lowercase , _lowercase: Dict = parser.parse_known_args()
_lowercase: int = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase )
if parsed_args and verbose:
print(f'''parsed the following generate kwargs: {parsed_args}''' )
_lowercase: Optional[Any] = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_lowercase: Optional[Any] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
_lowercase: List[str] = generate_summaries_or_translations(
_UpperCamelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_UpperCamelCase , )
if args.reference_path is None:
return {}
# Compute scores
_lowercase: Any = calculate_bleu if '''translation''' in args.task else calculate_rouge
_lowercase: List[str] = [x.rstrip() for x in open(args.save_path ).readlines()]
_lowercase: Any = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCamelCase )]
_lowercase: dict = score_fn(_UpperCamelCase , _UpperCamelCase )
scores.update(_UpperCamelCase )
if args.dump_args:
scores.update(_UpperCamelCase )
if args.info:
_lowercase: Optional[int] = args.info
if verbose:
print(_UpperCamelCase )
if args.score_path is not None:
json.dump(_UpperCamelCase , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 353 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Optional[int]:
"""simple docstring"""
_lowercase: Optional[int] = parent
_lowercase: str = batch_size
_lowercase: Any = num_channels
_lowercase: Any = image_size
_lowercase: Any = min_resolution
_lowercase: int = max_resolution
_lowercase: Union[str, Any] = do_resize
_lowercase: List[Any] = size if size is not None else {'''height''': 18, '''width''': 20}
_lowercase: Optional[int] = do_thumbnail
_lowercase: List[str] = do_align_axis
_lowercase: int = do_pad
_lowercase: Optional[Any] = do_normalize
_lowercase: List[Any] = image_mean
_lowercase: Union[str, Any] = image_std
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase_ = DonutImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: str = DonutImageProcessingTester(self )
@property
def lowercase_ ( self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
self.assertTrue(hasattr(A_ , '''do_thumbnail''' ) )
self.assertTrue(hasattr(A_ , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(A_ , '''do_pad''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
_lowercase: List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
_lowercase: Any = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@is_flaky()
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_lowercase: Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_lowercase: List[Any] = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase: str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_lowercase: Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_lowercase: Dict = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_lowercase: Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_lowercase: Dict = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 353 | 1 |
import functools
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = len(_UpperCamelCase )
UpperCAmelCase = len(_UpperCamelCase )
@functools.cache
def min_distance(__A , __A ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _UpperCamelCase ) , 1 + min_distance(_UpperCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = KandinskyInpaintPipeline
UpperCAmelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCAmelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCAmelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase = False
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
return 3_2
@property
def _UpperCamelCase ( self : int ) -> List[Any]:
return 3_2
@property
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
return self.time_input_dim
@property
def _UpperCamelCase ( self : Tuple ) -> Tuple:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Any ) -> Optional[int]:
return 1_0_0
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _UpperCamelCase ( self : int ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
UpperCAmelCase = MultilingualCLIP(lowerCAmelCase__ )
UpperCAmelCase = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def _UpperCamelCase ( self : str ) -> Optional[Any]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self : Tuple ) -> Any:
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , )
UpperCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple=0 ) -> str:
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create mask
UpperCAmelCase = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCAmelCase = 0
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase__ )
UpperCAmelCase = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _UpperCamelCase ( self : str ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : str ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Tuple ) -> int:
UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCAmelCase = 0
UpperCAmelCase = "a hat"
UpperCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
UpperCAmelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , )
UpperCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 1 | 0 |
import sys
lowerCamelCase : int = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _SCREAMING_SNAKE_CASE ( lowercase : str = N ):
'''simple docstring'''
lowerCamelCase_ = -sys.maxsize - 1
for i in range(len(lowercase ) - 12 ):
lowerCamelCase_ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowerCamelCase_ = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 | '''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__lowerCAmelCase : int = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__lowerCAmelCase : List[Any] = logging.getLogger()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__UpperCAmelCase = parser.parse_args()
return args.f
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any="eval" ):
"""simple docstring"""
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{split}_results.json""" )
if os.path.exists(UpperCamelCase__ ):
with open(UpperCamelCase__ , '''r''' ) as f:
return json.load(UpperCamelCase__ )
raise ValueError(f"""can't find {path}""" )
__lowerCAmelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( UpperCAmelCase ):
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(__a , '''argv''' , __a ):
run_flax_glue.main()
__UpperCAmelCase = get_results(__a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__a , '''argv''' , __a ):
run_clm_flax.main()
__UpperCAmelCase = get_results(__a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def snake_case__ ( self : Dict ) -> List[str]:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(__a , '''argv''' , __a ):
run_summarization_flax.main()
__UpperCAmelCase = get_results(__a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(__a , '''argv''' , __a ):
run_mlm_flax.main()
__UpperCAmelCase = get_results(__a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def snake_case__ ( self : Dict ) -> str:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__a , '''argv''' , __a ):
run_ta_mlm_flax.main()
__UpperCAmelCase = get_results(__a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(__a , '''argv''' , __a ):
run_flax_ner.main()
__UpperCAmelCase = get_results(__a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(__a , '''argv''' , __a ):
run_qa.main()
__UpperCAmelCase = get_results(__a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 262 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Optional[int] = '''sew-d'''
def __init__( self : Union[str, Any] , _A : Optional[int]=32 , _A : str=768 , _A : Any=12 , _A : Any=12 , _A : Any=3_072 , _A : List[str]=2 , _A : Optional[int]=512 , _A : Optional[int]=256 , _A : Union[str, Any]=True , _A : str=True , _A : Optional[int]=("p2c", "c2p") , _A : Union[str, Any]="layer_norm" , _A : List[str]="gelu_python" , _A : Tuple=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=0.1 , _A : str=0.0 , _A : int=0.1 , _A : Optional[int]=0.02 , _A : Dict=1E-7 , _A : Any=1E-5 , _A : List[Any]="group" , _A : Tuple="gelu" , _A : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _A : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : int=False , _A : Dict=128 , _A : Union[str, Any]=16 , _A : Dict=True , _A : Dict=0.05 , _A : Dict=10 , _A : Optional[Any]=2 , _A : int=0.0 , _A : List[Any]=10 , _A : List[Any]=0 , _A : List[Any]="mean" , _A : Dict=False , _A : int=False , _A : Dict=256 , _A : List[str]=0 , _A : List[str]=1 , _A : Tuple=2 , **_A : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
lowercase : int = hidden_size
lowercase : Any = feat_extract_norm
lowercase : Optional[int] = feat_extract_activation
lowercase : Union[str, Any] = list(_A )
lowercase : Any = list(_A )
lowercase : Optional[int] = list(_A )
lowercase : List[Any] = conv_bias
lowercase : Any = num_conv_pos_embeddings
lowercase : int = num_conv_pos_embedding_groups
lowercase : List[Any] = len(self.conv_dim )
lowercase : Optional[int] = num_hidden_layers
lowercase : Union[str, Any] = intermediate_size
lowercase : int = squeeze_factor
lowercase : Tuple = max_position_embeddings
lowercase : int = position_buckets
lowercase : Optional[int] = share_att_key
lowercase : List[Any] = relative_attention
lowercase : int = norm_rel_ebd
lowercase : Any = list(_A )
lowercase : int = hidden_act
lowercase : Optional[Any] = num_attention_heads
lowercase : Any = hidden_dropout
lowercase : List[str] = attention_dropout
lowercase : Any = activation_dropout
lowercase : Union[str, Any] = feat_proj_dropout
lowercase : Optional[int] = final_dropout
lowercase : Union[str, Any] = layer_norm_eps
lowercase : str = feature_layer_norm_eps
lowercase : Optional[int] = initializer_range
lowercase : List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase : Union[str, Any] = apply_spec_augment
lowercase : str = mask_time_prob
lowercase : List[str] = mask_time_length
lowercase : List[str] = mask_time_min_masks
lowercase : Tuple = mask_feature_prob
lowercase : Optional[Any] = mask_feature_length
lowercase : Tuple = mask_feature_min_masks
# ctc loss
lowercase : Optional[int] = ctc_loss_reduction
lowercase : List[str] = ctc_zero_infinity
# sequence classification
lowercase : Any = use_weighted_layer_sum
lowercase : List[str] = classifier_proj_size
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 596 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 596 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def SCREAMING_SNAKE_CASE ( a_ : Optional[Any]=None ):
if subparsers is not None:
__a = subparsers.add_parser('env' )
else:
__a = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file' , default=a_ , help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def SCREAMING_SNAKE_CASE ( a_ : str ):
__a = torch.__version__
__a = torch.cuda.is_available()
__a = is_xpu_available()
__a = is_npu_available()
__a = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(a_ ):
__a = load_config_from_file(args.config_file ).to_dict()
__a = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': f"{pt_version} ({pt_cuda_available})",
'PyTorch XPU available': str(a_ ),
'PyTorch NPU available': str(a_ ),
'System RAM': f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
__a = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([f"- {prop}: {val}" for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
__a = (
'\n'.join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(a_ , a_ )
else f"\t{accelerate_config}"
)
print(a_ )
__a = accelerate_config
return info
def SCREAMING_SNAKE_CASE ( ):
__a = env_command_parser()
__a = parser.parse_args()
env_command(a_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 539 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def SCREAMING_SNAKE_CASE ( a_ : Tuple , a_ : Dict ):
__a = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
__a = DatasetInfosDict.from_directory(a_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def SCREAMING_SNAKE_CASE ( a_ : Optional[int] , a_ : DatasetInfo ):
__a = str(a_ )
dataset_info.write_to_directory(a_ )
__a = DatasetInfo.from_directory(a_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(a_ , 'dataset_info.json' ) )
def SCREAMING_SNAKE_CASE ( ):
__a = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
__a = dataset_info._to_yaml_dict()
assert sorted(a_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__a = yaml.safe_dump(a_ )
__a = yaml.safe_load(a_ )
assert dataset_info_yaml_dict == reloaded
def SCREAMING_SNAKE_CASE ( ):
__a = DatasetInfo()
__a = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def SCREAMING_SNAKE_CASE ( a_ : List[str] , a_ : DatasetInfosDict ):
__a = str(a_ )
dataset_infos_dict.write_to_directory(a_ )
__a = DatasetInfosDict.from_directory(a_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__a = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__a = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(a_ , 'README.md' ) )
| 539 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = GenerationConfig(
do_sample=UpperCamelCase__, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase__, config_name=UpperCamelCase__ )
lowerCAmelCase_ = GenerationConfig.from_pretrained(UpperCamelCase__, config_name=UpperCamelCase__ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample, UpperCamelCase__ )
self.assertEqual(loaded_config.temperature, 0.7 )
self.assertEqual(loaded_config.length_penalty, 1.0 )
self.assertEqual(loaded_config.bad_words_ids, [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k, 50 )
self.assertEqual(loaded_config.max_length, 20 )
self.assertEqual(loaded_config.max_time, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCAmelCase_ = GenerationConfig.from_model_config(UpperCamelCase__ )
lowerCAmelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase__, UpperCamelCase__ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id, default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id, model_config.eos_token_id )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = GenerationConfig()
lowerCAmelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCAmelCase_ = copy.deepcopy(UpperCamelCase__ )
lowerCAmelCase_ = generation_config.update(**UpperCamelCase__ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens, 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase__, {'''foo''': '''bar'''} )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = GenerationConfig()
lowerCAmelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = GenerationConfig.from_pretrained(UpperCamelCase__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo, '''bar''' )
lowerCAmelCase_ = GenerationConfig.from_model_config(UpperCamelCase__ )
assert not hasattr(UpperCamelCase__, '''foo''' ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature, 1.0 )
self.assertEqual(default_config.do_sample, UpperCamelCase__ )
self.assertEqual(default_config.num_beams, 1 )
lowerCAmelCase_ = GenerationConfig(
do_sample=UpperCamelCase__, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], )
self.assertEqual(config.temperature, 0.7 )
self.assertEqual(config.do_sample, UpperCamelCase__ )
self.assertEqual(config.num_beams, 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = GenerationConfig.from_pretrained(UpperCamelCase__, temperature=1.0 )
self.assertEqual(loaded_config.temperature, 1.0 )
self.assertEqual(loaded_config.do_sample, UpperCamelCase__ )
self.assertEqual(loaded_config.num_beams, 1 ) # default value
@is_staging_test
class A ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
lowerCAmelCase_ = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = GenerationConfig(
do_sample=UpperCamelCase__, temperature=0.7, length_penalty=1.0, )
config.push_to_hub('''test-generation-config''', use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase__, getattr(UpperCamelCase__, UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token, repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase__, repo_id='''test-generation-config''', push_to_hub=UpperCamelCase__, use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase__, getattr(UpperCamelCase__, UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = GenerationConfig(
do_sample=UpperCamelCase__, temperature=0.7, length_penalty=1.0, )
config.push_to_hub('''valid_org/test-generation-config-org''', use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase__, getattr(UpperCamelCase__, UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase__, repo_id='''valid_org/test-generation-config-org''', push_to_hub=UpperCamelCase__, use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase__, getattr(UpperCamelCase__, UpperCamelCase__ ) )
| 325 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = 0
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
lowerCAmelCase_ = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''}, open(UpperCamelCase__, '''w''' ), )
json.dump({'''model_type''': '''clip'''}, open(UpperCamelCase__, '''w''' ) )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
lowerCAmelCase_ = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''}, open(UpperCamelCase__, '''w''' ), )
json.dump({'''model_type''': '''clip'''}, open(UpperCamelCase__, '''w''' ) )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCAmelCase_ = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
lowerCAmelCase_ = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''}, open(UpperCamelCase__, '''w''' ), )
json.dump({'''model_type''': '''clip'''}, open(UpperCamelCase__, '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(UpperCamelCase__ ).to_dict()
config_dict.pop('''image_processor_type''' )
lowerCAmelCase_ = CLIPImageProcessor(**UpperCamelCase__ )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
config.save_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
# make sure private variable is not incorrectly saved
lowerCAmelCase_ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''}, open(UpperCamelCase__, '''w''' ), )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__, '''clip-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''clip-base''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__, R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(UpperCamelCase__, revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__, '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''', ):
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase__ ):
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=UpperCamelCase__ )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(UpperCamelCase__, trust_remote_code=UpperCamelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__, '''NewImageProcessor''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
try:
AutoConfig.register('''custom''', UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__, UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoImageProcessor.register(UpperCamelCase__, UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
lowerCAmelCase_ = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''}, open(UpperCamelCase__, '''w''' ), )
json.dump({'''model_type''': '''clip'''}, open(UpperCamelCase__, '''w''' ) )
lowerCAmelCase_ = CustomImageProcessor.from_pretrained(UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
class A ( __UpperCAmelCase ):
__snake_case = True
try:
AutoConfig.register('''custom''', UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__, UpperCamelCase__ )
# If remote code is not set, the default is to use local
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCamelCase__, '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 325 | 1 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class a_ ( _lowerCAmelCase ):
def __init__( self : Optional[int] , *lowercase : Optional[Any] , **lowercase : Optional[Any] ):
"""simple docstring"""
super().__init__(*lowercase , **lowercase )
def lowercase__ ( self : Tuple , lowercase : str , lowercase : Tuple ):
"""simple docstring"""
lowercase_ :Dict = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowercase )
lowercase_ :Optional[int] = self.values[key]
def lowercase__ ( self : Dict ):
"""simple docstring"""
return (
sum(self.charge_factor - len(lowercase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowercase__ ( self : List[Any] , lowercase : Optional[Any] , lowercase : Dict=None ):
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowercase ) == 0
):
return key
return super()._collision_resolution(lowercase , lowercase )
| 172 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase_ ( __lowerCamelCase : Optional[Any] ):
lowercase_ :List[str] = {}
lowercase_ :int = job["started_at"]
lowercase_ :List[Any] = job["completed_at"]
lowercase_ :str = date_parser.parse(__lowerCamelCase )
lowercase_ :List[str] = date_parser.parse(__lowerCamelCase )
lowercase_ :Union[str, Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowercase_ :Union[str, Any] = start
lowercase_ :int = end
lowercase_ :Any = duration_in_min
return job_info
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Union[str, Any]=None ):
lowercase_ :Any = None
if token is not None:
lowercase_ :Optional[int] = {"Accept": "application/vnd.github+json", "Authorization": F'Bearer {token}'}
lowercase_ :str = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
lowercase_ :Optional[int] = requests.get(__lowerCamelCase ,headers=__lowerCamelCase ).json()
lowercase_ :Any = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(__lowerCamelCase ) for job in result["jobs"]} )
lowercase_ :Dict = math.ceil((result["total_count"] - 1_00) / 1_00 )
for i in range(__lowerCamelCase ):
lowercase_ :List[Any] = requests.get(url + F'&page={i + 2}' ,headers=__lowerCamelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(__lowerCamelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
lowerCAmelCase : Tuple =parser.parse_args()
lowerCAmelCase : int =get_job_time(args.workflow_run_id)
lowerCAmelCase : Optional[int] =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 172 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict, _lowerCamelCase : int, _lowerCamelCase : Dict=7, _lowerCamelCase : List[str]=3, _lowerCamelCase : Optional[int]=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Dict=None, _lowerCamelCase : Dict=True, _lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5], _lowerCamelCase : List[Any]=[0.5, 0.5, 0.5], _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : List[str]=1 / 2_55, _lowerCamelCase : List[Any]=True, ):
'''simple docstring'''
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
__A = parent
__A = batch_size
__A = num_channels
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_normalize
__A = image_mean
__A = image_std
__A = do_rescale
__A = rescale_factor
__A = do_pad
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Dict, _lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
if not batched:
__A = image_inputs[0]
if isinstance(_lowerCamelCase, Image.Image ):
__A , __A = image.size
else:
__A , __A = image.shape[1], image.shape[2]
if w < h:
__A = int(self.size['''shortest_edge'''] * h / w )
__A = self.size['''shortest_edge''']
elif w > h:
__A = self.size['''shortest_edge''']
__A = int(self.size['''shortest_edge'''] * w / h )
else:
__A = self.size['''shortest_edge''']
__A = self.size['''shortest_edge''']
else:
__A = []
for image in image_inputs:
__A , __A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A = max(_lowerCamelCase, key=lambda _lowerCamelCase : item[0] )[0]
__A = max(_lowerCamelCase, key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = DeformableDetrImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = DeformableDetrImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_rescale''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_pad''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad, _lowerCamelCase )
__A = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=_lowerCamelCase )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
__A , __A = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
__A , __A = self.image_processor_tester.get_expected_values(_lowerCamelCase, batched=_lowerCamelCase )
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
__A , __A = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
__A , __A = self.image_processor_tester.get_expected_values(_lowerCamelCase, batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
__A , __A = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
__A , __A = self.image_processor_tester.get_expected_values(_lowerCamelCase, batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
# prepare image and target
__A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''', '''r''' ) as f:
__A = json.loads(f.read() )
__A = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
__A = DeformableDetrImageProcessor()
__A = image_processing(images=_lowerCamelCase, annotations=_lowerCamelCase, return_tensors='''pt''' )
# verify pixel values
__A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape, _lowerCamelCase )
__A = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], _lowerCamelCase, atol=1e-4 ) )
# verify area
__A = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], _lowerCamelCase ) )
# verify boxes
__A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, _lowerCamelCase )
__A = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], _lowerCamelCase, atol=1e-3 ) )
# verify image_id
__A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], _lowerCamelCase ) )
# verify is_crowd
__A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], _lowerCamelCase ) )
# verify class_labels
__A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], _lowerCamelCase ) )
# verify orig_size
__A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], _lowerCamelCase ) )
# verify size
__A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], _lowerCamelCase ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
# prepare image, target and masks_path
__A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''', '''r''' ) as f:
__A = json.loads(f.read() )
__A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
__A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__A = DeformableDetrImageProcessor(format='''coco_panoptic''' )
__A = image_processing(images=_lowerCamelCase, annotations=_lowerCamelCase, masks_path=_lowerCamelCase, return_tensors='''pt''' )
# verify pixel values
__A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape, _lowerCamelCase )
__A = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], _lowerCamelCase, atol=1e-4 ) )
# verify area
__A = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], _lowerCamelCase ) )
# verify boxes
__A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, _lowerCamelCase )
__A = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], _lowerCamelCase, atol=1e-3 ) )
# verify image_id
__A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], _lowerCamelCase ) )
# verify is_crowd
__A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], _lowerCamelCase ) )
# verify class_labels
__A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], _lowerCamelCase ) )
# verify masks
__A = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item(), _lowerCamelCase )
# verify orig_size
__A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], _lowerCamelCase ) )
# verify size
__A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], _lowerCamelCase ) )
| 215 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
__A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__A = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__A = ''''''
else:
__A = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
__A = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__A = in_proj_weight[
: config.hidden_size, :
]
__A = in_proj_bias[: config.hidden_size]
__A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A = in_proj_weight[
-config.hidden_size :, :
]
__A = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = dct.pop(__UpperCamelCase )
__A = val
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__A = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True ):
"""simple docstring"""
__A = ViTConfig()
# patch_size
if model_name[-1] == "8":
__A = 8
# set labels if required
if not base_model:
__A = 1_0_0_0
__A = '''huggingface/label-files'''
__A = '''imagenet-1k-id2label.json'''
__A = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__A = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__A = 3_8_4
__A = 1_5_3_6
__A = 1_2
__A = 6
# load original model from torch hub
__A = torch.hub.load('''facebookresearch/dino:main''' , __UpperCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__A = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
__A = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
if base_model:
__A = ViTModel(__UpperCamelCase , add_pooling_layer=__UpperCamelCase ).eval()
else:
__A = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
__A = ViTImageProcessor()
__A = image_processor(images=prepare_img() , return_tensors='''pt''' )
__A = encoding['''pixel_values''']
__A = model(__UpperCamelCase )
if base_model:
__A = original_model(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
__A = original_model(__UpperCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1e-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCamelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
lowercase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 215 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCamelCase ( A__ : str ) -> Dict:
def decorator(A__ : Any ):
lowerCamelCase_ : str = getattr(A__ , """handle_key""" , [] )
handle += [key]
setattr(A__ , """handle_key""" , A__ )
return func
return decorator
def __lowerCamelCase ( *A__ : List[str] ) -> List[str]:
def decorator(A__ : Optional[int] ):
lowerCamelCase_ : Dict = getattr(A__ , """handle_key""" , [] )
handle += keys
setattr(A__ , """handle_key""" , A__ )
return func
return decorator
class SCREAMING_SNAKE_CASE_ (lowercase__ ):
'''simple docstring'''
def __new__( cls : Tuple , __a : List[Any] , __a : Dict , __a : Dict ) ->str:
lowerCamelCase_ : int = super().__new__(cls , __a , __a , __a )
if not hasattr(__a , """key_handler""" ):
setattr(__a , """key_handler""" , {} )
setattr(__a , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
lowerCamelCase_ : Tuple = getattr(__a , """handle_key""" , [] )
for key in handled_keys:
lowerCamelCase_ : Tuple = value
return new_cls
@staticmethod
def _lowerCAmelCase ( cls : Optional[Any] ) ->Dict:
lowerCamelCase_ : str = get_character()
if char != KEYMAP["undefined"]:
lowerCamelCase_ : List[str] = ord(__a )
lowerCamelCase_ : int = cls.key_handler.get(__a )
if handler:
lowerCamelCase_ : Tuple = char
return handler(cls )
else:
return None
def __lowerCamelCase ( cls : str ) -> Optional[Any]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 278 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a_ : Optional[Any] = TypeVar('''T''')
class __lowercase( Generic[T] ):
'''simple docstring'''
__a : deque[T] # Cache store of keys
__a : set[T] # References of the keys in cache
__a : int = 10 # Maximum capacity of cache
def __init__( self , __a ):
__lowerCamelCase : List[str] = deque()
__lowerCamelCase : Tuple = set()
if not n:
__lowerCamelCase : Any = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
__lowerCamelCase : int = n
def snake_case_ ( self , __a ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__lowerCamelCase : Tuple = self.dq_store.pop()
self.key_reference.remove(__a )
else:
self.dq_store.remove(__a )
self.dq_store.appendleft(__a )
self.key_reference.add(__a )
def snake_case_ ( self ):
for k in self.dq_store:
print(__a )
def __repr__( self ):
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 594 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( lowercase , unittest.TestCase ):
UpperCAmelCase : Any = CTRLTokenizer
UpperCAmelCase : List[Any] = False
UpperCAmelCase : str = False
def _lowercase (self : Optional[int]) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : Optional[Any] = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
__snake_case : List[Any] = dict(zip(_A , range(len(_A))))
__snake_case : Optional[Any] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
__snake_case : Union[str, Any] = {'unk_token': '<unk>'}
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_A) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(_A))
def _lowercase (self : Union[str, Any] , **_A : Union[str, Any]) -> Tuple:
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_A)
def _lowercase (self : str , _A : Any) -> Tuple:
__snake_case : int = 'adapt react readapt apt'
__snake_case : int = 'adapt react readapt apt'
return input_text, output_text
def _lowercase (self : Tuple) -> str:
__snake_case : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
__snake_case : Dict = 'adapt react readapt apt'
__snake_case : int = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
__snake_case : Optional[Any] = tokenizer.tokenize(_A)
self.assertListEqual(_A , _A)
__snake_case : List[str] = tokens + [tokenizer.unk_token]
__snake_case : Union[str, Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A) , _A)
| 192 | """simple docstring"""
import argparse
from collections import defaultdict
import yaml
_a : Tuple= "docs/source/en/_toctree.yml"
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = defaultdict(UpperCAmelCase_ )
__snake_case : List[str] = []
__snake_case : Tuple = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(UpperCAmelCase_ )
__snake_case : Any = new_doc_list
__snake_case : Dict = [key for key, value in counts.items() if value > 1]
__snake_case : Optional[int] = []
for duplicate_key in duplicates:
__snake_case : Tuple = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(UpperCAmelCase_ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__snake_case : List[Any] = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCAmelCase_ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(UpperCAmelCase_ )
# Sort
return overview_doc
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int]=False ) -> str:
'''simple docstring'''
with open(UpperCAmelCase_ , encoding='utf-8' ) as f:
__snake_case : List[Any] = yaml.safe_load(f.read() )
# Get to the API doc
__snake_case : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
__snake_case : Dict = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__snake_case : Optional[Any] = api_doc[scheduler_idx]['sections']
__snake_case : Tuple = clean_doc_toc(UpperCAmelCase_ )
__snake_case : Tuple = False
if new_scheduler_doc != scheduler_doc:
__snake_case : Optional[int] = True
if overwrite:
__snake_case : Tuple = new_scheduler_doc
if diff:
if overwrite:
__snake_case : Optional[int] = api_doc
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def __UpperCAmelCase ( UpperCAmelCase_ : Any=False ) -> Union[str, Any]:
'''simple docstring'''
with open(UpperCAmelCase_ , encoding='utf-8' ) as f:
__snake_case : Dict = yaml.safe_load(f.read() )
# Get to the API doc
__snake_case : Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case : Dict = content[api_idx]['sections']
# Then to the model doc
__snake_case : Dict = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__snake_case : Optional[int] = False
__snake_case : Optional[Any] = api_doc[pipeline_idx]['sections']
__snake_case : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__snake_case : List[str] = pipeline_doc['section']
__snake_case : Optional[int] = clean_doc_toc(UpperCAmelCase_ )
if overwrite:
__snake_case : List[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCAmelCase_ )
# sort overall pipeline doc
__snake_case : List[Any] = clean_doc_toc(UpperCAmelCase_ )
if new_pipeline_docs != pipeline_docs:
__snake_case : Dict = True
if overwrite:
__snake_case : Tuple = new_pipeline_docs
if diff:
if overwrite:
__snake_case : Union[str, Any] = api_doc
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_a : Optional[int]= argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_a : List[str]= parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 192 | 1 |
def __snake_case ( __magic_name__ ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("only integers accepted as input" )
else:
lowercase = str(abs(__magic_name__ ) )
lowercase = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int("".join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 441 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_snake_case : Optional[Any] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[str]=6.0 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Tuple="fp4" , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :Optional[Any] , ) ->Any:
lowercase = load_in_abit
lowercase = load_in_abit
lowercase = llm_inta_threshold
lowercase = llm_inta_skip_modules
lowercase = llm_inta_enable_fpaa_cpu_offload
lowercase = llm_inta_has_fpaa_weight
lowercase = bnb_abit_quant_type
lowercase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase = torch.floataa
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , torch.dtype ):
lowercase = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def SCREAMING_SNAKE_CASE( self :Any ) ->str:
if not isinstance(self.llm_inta_threshold , lowerCAmelCase__ ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowerCAmelCase__ ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowerCAmelCase__ ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , lowerCAmelCase__ ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , lowerCAmelCase__ ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , lowerCAmelCase__ ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def SCREAMING_SNAKE_CASE( self :List[str] ) ->Tuple:
return self.load_in_abit or self.load_in_abit
def SCREAMING_SNAKE_CASE( self :List[str] ) ->int:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def SCREAMING_SNAKE_CASE( cls :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :Any ) ->Optional[Any]:
lowercase = cls(**lowerCAmelCase__ )
lowercase = []
for key, value in kwargs.items():
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
to_remove.append(lowerCAmelCase__ )
for key in to_remove:
kwargs.pop(lowerCAmelCase__ , lowerCAmelCase__ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def SCREAMING_SNAKE_CASE( self :Dict , lowerCAmelCase__ :Union[str, os.PathLike] ) ->Optional[int]:
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer:
lowercase = self.to_dict()
lowercase = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + "\n"
writer.write(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Any ) ->Dict[str, Any]:
lowercase = copy.deepcopy(self.__dict__ )
lowercase = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__( self :int ) ->Optional[Any]:
return F'''{self.__class__.__name__} {self.to_json_string()}'''
def SCREAMING_SNAKE_CASE( self :Optional[Any] , lowerCAmelCase__ :bool = True ) ->str:
if use_diff is True:
lowercase = self.to_diff_dict()
else:
lowercase = self.to_dict()
return json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + "\n"
def SCREAMING_SNAKE_CASE( self :int ) ->Dict[str, Any]:
lowercase = self.to_dict()
# get the default config dict
lowercase = BitsAndBytesConfig().to_dict()
lowercase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase = value
return serializable_config_dict
| 441 | 1 |
"""simple docstring"""
import math
import unittest
def UpperCAmelCase ( snake_case : int ):
assert isinstance(snake_case , snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def __UpperCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
with self.assertRaises(a__):
is_prime(-19)
self.assertFalse(
is_prime(0) ,'''Zero doesn\'t have any positive factors, primes must have exactly two.''' ,)
self.assertFalse(
is_prime(1) ,'''One only has 1 positive factor, primes must have exactly two.''' ,)
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 439 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( UpperCamelCase_ , unittest.TestCase ):
snake_case__ = AudioLDMPipeline
snake_case__ = TEXT_TO_AUDIO_PARAMS
snake_case__ = TEXT_TO_AUDIO_BATCH_PARAMS
snake_case__ = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __UpperCamelCase ( self : int) -> Any:
"""simple docstring"""
torch.manual_seed(0)
_lowerCAmelCase:int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=(32, 64) ,class_embed_type='''simple_projection''' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=a__ ,)
_lowerCAmelCase:Optional[int] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=a__ ,set_alpha_to_one=a__ ,)
torch.manual_seed(0)
_lowerCAmelCase:Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
torch.manual_seed(0)
_lowerCAmelCase:Dict = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,)
_lowerCAmelCase:str = ClapTextModelWithProjection(a__)
_lowerCAmelCase:Dict = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' ,model_max_length=77)
_lowerCAmelCase:Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_6000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=a__ ,)
_lowerCAmelCase:List[Any] = SpeechTaHifiGan(a__)
_lowerCAmelCase:Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def __UpperCamelCase ( self : List[Any] ,a__ : int ,a__ : Tuple=0) -> Optional[int]:
"""simple docstring"""
if str(a__).startswith('''mps'''):
_lowerCAmelCase:Tuple = torch.manual_seed(a__)
else:
_lowerCAmelCase:Dict = torch.Generator(device=a__).manual_seed(a__)
_lowerCAmelCase:Tuple = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def __UpperCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_lowerCAmelCase:str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase:Dict = self.get_dummy_components()
_lowerCAmelCase:List[str] = AudioLDMPipeline(**a__)
_lowerCAmelCase:Optional[int] = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:str = self.get_dummy_inputs(a__)
_lowerCAmelCase:Optional[Any] = audioldm_pipe(**a__)
_lowerCAmelCase:List[str] = output.audios[0]
assert audio.ndim == 1
assert len(a__) == 256
_lowerCAmelCase:List[str] = audio[:10]
_lowerCAmelCase:Dict = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def __UpperCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:int = self.get_dummy_components()
_lowerCAmelCase:Optional[Any] = AudioLDMPipeline(**a__)
_lowerCAmelCase:Union[str, Any] = audioldm_pipe.to(a__)
_lowerCAmelCase:Any = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Dict = self.get_dummy_inputs(a__)
_lowerCAmelCase:List[Any] = 3 * [inputs['''prompt''']]
# forward
_lowerCAmelCase:Dict = audioldm_pipe(**a__)
_lowerCAmelCase:Union[str, Any] = output.audios[0]
_lowerCAmelCase:Tuple = self.get_dummy_inputs(a__)
_lowerCAmelCase:Optional[Any] = 3 * [inputs.pop('''prompt''')]
_lowerCAmelCase:Tuple = audioldm_pipe.tokenizer(
a__ ,padding='''max_length''' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=a__ ,return_tensors='''pt''' ,)
_lowerCAmelCase:Optional[int] = text_inputs['''input_ids'''].to(a__)
_lowerCAmelCase:List[Any] = audioldm_pipe.text_encoder(
a__ ,)
_lowerCAmelCase:int = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase:List[Any] = F.normalize(a__ ,dim=-1)
_lowerCAmelCase:int = prompt_embeds
# forward
_lowerCAmelCase:Tuple = audioldm_pipe(**a__)
_lowerCAmelCase:Dict = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def __UpperCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Any = self.get_dummy_components()
_lowerCAmelCase:str = AudioLDMPipeline(**a__)
_lowerCAmelCase:int = audioldm_pipe.to(a__)
_lowerCAmelCase:Any = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Any = self.get_dummy_inputs(a__)
_lowerCAmelCase:Tuple = 3 * ['''this is a negative prompt''']
_lowerCAmelCase:str = negative_prompt
_lowerCAmelCase:List[Any] = 3 * [inputs['''prompt''']]
# forward
_lowerCAmelCase:Optional[Any] = audioldm_pipe(**a__)
_lowerCAmelCase:Any = output.audios[0]
_lowerCAmelCase:Tuple = self.get_dummy_inputs(a__)
_lowerCAmelCase:Tuple = 3 * [inputs.pop('''prompt''')]
_lowerCAmelCase:Tuple = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase:str = audioldm_pipe.tokenizer(
a__ ,padding='''max_length''' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=a__ ,return_tensors='''pt''' ,)
_lowerCAmelCase:Dict = text_inputs['''input_ids'''].to(a__)
_lowerCAmelCase:int = audioldm_pipe.text_encoder(
a__ ,)
_lowerCAmelCase:List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase:List[str] = F.normalize(a__ ,dim=-1)
embeds.append(a__)
_lowerCAmelCase , _lowerCAmelCase:Optional[Any] = embeds
# forward
_lowerCAmelCase:List[str] = audioldm_pipe(**a__)
_lowerCAmelCase:int = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def __UpperCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
_lowerCAmelCase:Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase:Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase:Union[str, Any] = PNDMScheduler(skip_prk_steps=a__)
_lowerCAmelCase:Union[str, Any] = AudioLDMPipeline(**a__)
_lowerCAmelCase:List[str] = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Tuple = self.get_dummy_inputs(a__)
_lowerCAmelCase:int = '''egg cracking'''
_lowerCAmelCase:Union[str, Any] = audioldm_pipe(**a__ ,negative_prompt=a__)
_lowerCAmelCase:Dict = output.audios[0]
assert audio.ndim == 1
assert len(a__) == 256
_lowerCAmelCase:Optional[int] = audio[:10]
_lowerCAmelCase:Optional[Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def __UpperCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase:Optional[int] = self.get_dummy_components()
_lowerCAmelCase:str = PNDMScheduler(skip_prk_steps=a__)
_lowerCAmelCase:Any = AudioLDMPipeline(**a__)
_lowerCAmelCase:List[str] = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Tuple = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
_lowerCAmelCase:str = audioldm_pipe(a__ ,num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_lowerCAmelCase:Dict = 2
_lowerCAmelCase:List[str] = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_lowerCAmelCase:Any = 2
_lowerCAmelCase:Tuple = audioldm_pipe(a__ ,num_inference_steps=2 ,num_waveforms_per_prompt=a__).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_lowerCAmelCase:str = 2
_lowerCAmelCase:List[Any] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=a__).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __UpperCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase:Dict = self.get_dummy_components()
_lowerCAmelCase:Optional[Any] = AudioLDMPipeline(**a__)
_lowerCAmelCase:List[str] = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
_lowerCAmelCase:Tuple = self.get_dummy_inputs(a__)
_lowerCAmelCase:Optional[Any] = audioldm_pipe(audio_length_in_s=0.016 ,**a__)
_lowerCAmelCase:List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(a__) / vocoder_sampling_rate == 0.016
_lowerCAmelCase:List[str] = audioldm_pipe(audio_length_in_s=0.032 ,**a__)
_lowerCAmelCase:Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(a__) / vocoder_sampling_rate == 0.032
def __UpperCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = self.get_dummy_components()
_lowerCAmelCase:Tuple = AudioLDMPipeline(**a__)
_lowerCAmelCase:Dict = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Optional[int] = ['''hey''']
_lowerCAmelCase:List[str] = audioldm_pipe(a__ ,num_inference_steps=1)
_lowerCAmelCase:Tuple = output.audios.shape
assert audio_shape == (1, 256)
_lowerCAmelCase:Any = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_lowerCAmelCase:Optional[Any] = SpeechTaHifiGan(a__).to(a__)
_lowerCAmelCase:int = audioldm_pipe(a__ ,num_inference_steps=1)
_lowerCAmelCase:List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __UpperCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=a__)
def __UpperCamelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=a__)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def __UpperCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a__)
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Union[str, Any] ,a__ : Optional[Any] ,a__ : int="cpu" ,a__ : str=torch.floataa ,a__ : Union[str, Any]=0) -> int:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = torch.Generator(device=a__).manual_seed(a__)
_lowerCAmelCase:Tuple = np.random.RandomState(a__).standard_normal((1, 8, 128, 16))
_lowerCAmelCase:List[str] = torch.from_numpy(a__).to(device=a__ ,dtype=a__)
_lowerCAmelCase:Dict = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def __UpperCamelCase ( self : Dict) -> int:
"""simple docstring"""
_lowerCAmelCase:str = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''')
_lowerCAmelCase:Optional[Any] = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Optional[int] = self.get_inputs(a__)
_lowerCAmelCase:Optional[Any] = 25
_lowerCAmelCase:int = audioldm_pipe(**a__).audios[0]
assert audio.ndim == 1
assert len(a__) == 8_1920
_lowerCAmelCase:int = audio[7_7230:7_7240]
_lowerCAmelCase:Optional[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315])
_lowerCAmelCase:Dict = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1E-2
def __UpperCamelCase ( self : List[str]) -> str:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''')
_lowerCAmelCase:List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
_lowerCAmelCase:Union[str, Any] = audioldm_pipe.to(a__)
audioldm_pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Optional[Any] = self.get_inputs(a__)
_lowerCAmelCase:Union[str, Any] = audioldm_pipe(**a__).audios[0]
assert audio.ndim == 1
assert len(a__) == 8_1920
_lowerCAmelCase:Tuple = audio[2_7780:2_7790]
_lowerCAmelCase:Optional[Any] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212])
_lowerCAmelCase:Tuple = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3E-2
| 439 | 1 |
from __future__ import annotations
from fractions import Fraction
def _snake_case (__lowercase , __lowercase):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _snake_case (__lowercase):
UpperCamelCase_ = []
UpperCamelCase_ = 11
UpperCamelCase_ = int('1' + '0' * digit_len)
for num in range(__lowercase , __lowercase):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowercase , __lowercase):
solutions.append(f"""{num}/{den}""")
den += 1
num += 1
UpperCamelCase_ = 10
return solutions
def _snake_case (__lowercase = 2):
UpperCamelCase_ = 1.0
for fraction in fraction_list(__lowercase):
UpperCamelCase_ = Fraction(__lowercase)
result *= frac.denominator / frac.numerator
return int(__lowercase)
if __name__ == "__main__":
print(solution())
| 23 |
"""simple docstring"""
from __future__ import annotations
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case ) -> None:
_UpperCAmelCase = order
# a_{0} ... a_{k}
_UpperCAmelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_UpperCAmelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_UpperCAmelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
_UpperCAmelCase = [0.0] * self.order
def lowerCamelCase_ ( self , snake_case , snake_case ) -> None:
if len(snake_case ) < self.order:
_UpperCAmelCase = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
_UpperCAmelCase = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
_UpperCAmelCase = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
_UpperCAmelCase = a_coeffs
_UpperCAmelCase = b_coeffs
def lowerCamelCase_ ( self , snake_case ) -> float:
_UpperCAmelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_UpperCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_UpperCAmelCase = self.input_history[:-1]
_UpperCAmelCase = self.output_history[:-1]
_UpperCAmelCase = sample
_UpperCAmelCase = result
return result
| 573 | 0 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase_ = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
lowercase_ = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
lowercase_ = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
return float((preds == labels).mean() )
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =simple_accuracy(a__ , a__ )
_lowerCAmelCase =float(fa_score(y_true=a__ , y_pred=a__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =float(pearsonr(a__ , a__ )[0] )
_lowerCAmelCase =float(spearmanr(a__ , a__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class SCREAMING_SNAKE_CASE ( datasets.Metric):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> int:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def UpperCamelCase__ ( self , __A , __A ) -> str:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__A , __A )}
elif self.config_name == "stsb":
return pearson_and_spearman(__A , __A )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__A , __A )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__A , __A )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 704 | '''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : List[Any] =StableUnCLIPImgaImgPipeline
UpperCamelCase__ : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ : int =frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ : Optional[Any] =frozenset([])
def A__ ( self : Optional[Any] ):
lowercase__ = 32
lowercase__ = embedder_hidden_size
# image encoding components
lowercase__ = CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowercase__ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase, projection_dim=__lowercase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowercase__ = StableUnCLIPImageNormalizer(embedding_dim=__lowercase )
lowercase__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=__lowercase, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="projection", projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=__lowercase, layers_per_block=1, upcast_attention=__lowercase, use_linear_projection=__lowercase, )
torch.manual_seed(0 )
lowercase__ = DDIMScheduler(
beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, prediction_type="v_prediction", set_alpha_to_one=__lowercase, steps_offset=1, )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL()
lowercase__ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self : Dict, __lowercase : Tuple, __lowercase : Union[str, Any]=0, __lowercase : Tuple=True ):
if str(__lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(__lowercase )
else:
lowercase__ = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowercase__ = floats_tensor((1, 3, 32, 32), rng=random.Random(__lowercase ) ).to(__lowercase )
if pil_image:
lowercase__ = input_image * 0.5 + 0.5
lowercase__ = input_image.clamp(0, 1 )
lowercase__ = input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowercase__ = DiffusionPipeline.numpy_to_pil(__lowercase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self : str ):
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableUnCLIPImgaImgPipeline(**__lowercase )
lowercase__ = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = self.get_dummy_inputs(__lowercase )
inputs.update({"image_embeds": None} )
lowercase__ = sd_pipe(**__lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self : List[str] ):
lowercase__ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase )
def A__ ( self : Optional[Any] ):
lowercase__ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__lowercase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", )
def A__ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase):
def A__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : List[Any] ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img", torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ = pipe(__lowercase, "anime turle", generator=__lowercase, output_type="np" )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase, __lowercase )
def A__ ( self : Any ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ = pipe(__lowercase, "anime turle", generator=__lowercase, output_type="np" )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase, __lowercase )
def A__ ( self : Optional[int] ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.floataa )
lowercase__ = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = pipe(
__lowercase, "anime turtle", num_inference_steps=2, output_type="np", )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 413 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : int = ["pixel_values"]
def __init__( self , a__ = True , a__ = None , a__ = PILImageResampling.BICUBIC , a__ = True , a__ = None , a__ = True , a__ = 1 / 2_55 , a__ = True , a__ = None , a__ = None , a__ = True , **a__ , ) -> None:
'''simple docstring'''
super().__init__(**a__ )
__snake_case :Tuple = size if size is not None else {"""shortest_edge""": 2_24}
__snake_case :Any = get_size_dict(a__ , default_to_square=a__ )
__snake_case :str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__snake_case :Optional[int] = get_size_dict(a__ , default_to_square=a__ , param_name="""crop_size""" )
__snake_case :Optional[Any] = do_resize
__snake_case :List[str] = size
__snake_case :List[Any] = resample
__snake_case :str = do_center_crop
__snake_case :List[str] = crop_size
__snake_case :Optional[Any] = do_rescale
__snake_case :Optional[int] = rescale_factor
__snake_case :Dict = do_normalize
__snake_case :int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__snake_case :List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
__snake_case :List[Any] = do_convert_rgb
def __lowercase ( self , a__ , a__ , a__ = PILImageResampling.BICUBIC , a__ = None , **a__ , ) -> np.ndarray:
'''simple docstring'''
__snake_case :List[str] = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__snake_case :Optional[Any] = get_resize_output_image_size(a__ , size=size["""shortest_edge"""] , default_to_square=a__ )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __lowercase ( self , a__ , a__ , a__ = None , **a__ , ) -> np.ndarray:
'''simple docstring'''
__snake_case :Union[str, Any] = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a__ , size=(size["""height"""], size["""width"""]) , data_format=a__ , **a__ )
def __lowercase ( self , a__ , a__ , a__ = None , **a__ , ) -> int:
'''simple docstring'''
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __lowercase ( self , a__ , a__ , a__ , a__ = None , **a__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __lowercase ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ) -> PIL.Image.Image:
'''simple docstring'''
__snake_case :Optional[Any] = do_resize if do_resize is not None else self.do_resize
__snake_case :Dict = size if size is not None else self.size
__snake_case :int = get_size_dict(a__ , param_name="""size""" , default_to_square=a__ )
__snake_case :List[Any] = resample if resample is not None else self.resample
__snake_case :List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case :Tuple = crop_size if crop_size is not None else self.crop_size
__snake_case :Optional[int] = get_size_dict(a__ , param_name="""crop_size""" , default_to_square=a__ )
__snake_case :Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case :Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case :Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case :List[Any] = image_mean if image_mean is not None else self.image_mean
__snake_case :Tuple = image_std if image_std is not None else self.image_std
__snake_case :Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__snake_case :List[Any] = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__snake_case :Any = [convert_to_rgb(a__ ) for image in images]
# All transformations expect numpy arrays.
__snake_case :List[str] = [to_numpy_array(a__ ) for image in images]
if do_resize:
__snake_case :str = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
if do_center_crop:
__snake_case :int = [self.center_crop(image=a__ , size=a__ ) for image in images]
if do_rescale:
__snake_case :List[Any] = [self.rescale(image=a__ , scale=a__ ) for image in images]
if do_normalize:
__snake_case :str = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images]
__snake_case :Any = [to_channel_dimension_format(a__ , a__ ) for image in images]
__snake_case :List[str] = {"""pixel_values""": images}
return BatchFeature(data=a__ , tensor_type=a__ ) | 701 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase__ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""DPTFeatureExtractor"""]
lowerCamelCase__ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 | 0 |
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = multiprocessing.Manager()
_lowerCamelCase : Optional[int] = manager.list()
_lowerCamelCase : Union[str, Any] = multiprocessing.Process(target=_lowerCamelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_lowerCamelCase : Dict = shutil.rmtree
_lowerCamelCase : Optional[int] = os.rmdir
_lowerCamelCase : List[str] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_lowerCamelCase : Optional[int] = {}
with swallow_io():
with time_limit(_lowerCamelCase ):
exec(_lowerCamelCase , _lowerCamelCase )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(F"""failed: {e}""" )
# Needed for cleaning up.
_lowerCamelCase : str = rmtree
_lowerCamelCase : Optional[Any] = rmdir
_lowerCamelCase : List[str] = chdir
@contextlib.contextmanager
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
def signal_handler(_lowerCamelCase , _lowerCamelCase ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , _lowerCamelCase )
signal.signal(signal.SIGALRM , _lowerCamelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = WriteOnlyStringIO()
with contextlib.redirect_stdout(_lowerCamelCase ):
with contextlib.redirect_stderr(_lowerCamelCase ):
with redirect_stdin(_lowerCamelCase ):
yield
@contextlib.contextmanager
def lowerCamelCase_( ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(_lowerCamelCase ):
yield dirname
class A_ ( _a ):
pass
class A_ ( io.StringIO ):
def _lowercase ( self: Optional[Any] ,*__lowerCAmelCase: int ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
raise OSError
def _lowercase ( self: Union[str, Any] ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
raise OSError
def _lowercase ( self: Any ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
raise OSError
def _lowercase ( self: Dict ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: List[Any] ):
'''simple docstring'''
return False
class A_ ( contextlib._RedirectStream ): # type: ignore
lowerCAmelCase__ = 'stdin'
@contextlib.contextmanager
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
if root == ".":
yield
return
_lowerCamelCase : List[str] = os.getcwd()
os.chdir(_lowerCamelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase=None ) -> str:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Tuple = None
import os
_lowerCamelCase : List[str] = "1"
_lowerCamelCase : Dict = None
_lowerCamelCase : Any = None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : str = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Dict = None
_lowerCamelCase : Dict = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : int = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : str = None
_lowerCamelCase : str = None
_lowerCamelCase : Tuple = None
_lowerCamelCase : int = None
_lowerCamelCase : Any = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : str = None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Tuple = None
_lowerCamelCase : Any = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Optional[Any] = None
import shutil
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Dict = None
_lowerCamelCase : Dict = None
import subprocess
_lowerCamelCase : Dict = None # type: ignore
_lowerCamelCase : Any = None
import sys
_lowerCamelCase : str = None
_lowerCamelCase : str = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[Any] = None | 46 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : jnp.ndarray
@flax_register_to_config
class a ( nn.Module ,__lowercase ,__lowercase ):
SCREAMING_SNAKE_CASE__ : int = 32
SCREAMING_SNAKE_CASE__ : int = 4
SCREAMING_SNAKE_CASE__ : int = 4
SCREAMING_SNAKE_CASE__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
SCREAMING_SNAKE_CASE__ : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE__ : Tuple[int] = (320, 640, 1280, 1280)
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE__ : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE__ : int = 1280
SCREAMING_SNAKE_CASE__ : float = 0.0
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE__ : bool = True
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : bool = False
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = (1, self.in_channels, self.sample_size, self.sample_size)
__SCREAMING_SNAKE_CASE: Tuple = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE: Optional[Any] = jnp.ones((1,) , dtype=jnp.intaa )
__SCREAMING_SNAKE_CASE: Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = jax.random.split(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["params"]
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.block_out_channels
__SCREAMING_SNAKE_CASE: Union[str, Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__SCREAMING_SNAKE_CASE: Any = self.num_attention_heads or self.attention_head_dim
# input
__SCREAMING_SNAKE_CASE: str = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__SCREAMING_SNAKE_CASE: int = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__SCREAMING_SNAKE_CASE: Union[str, Any] = FlaxTimestepEmbedding(_lowerCAmelCase , dtype=self.dtype )
__SCREAMING_SNAKE_CASE: Optional[int] = self.only_cross_attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Any = (num_attention_heads,) * len(self.down_block_types )
# down
__SCREAMING_SNAKE_CASE: Union[str, Any] = []
__SCREAMING_SNAKE_CASE: List[str] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__SCREAMING_SNAKE_CASE: List[str] = output_channel
__SCREAMING_SNAKE_CASE: str = block_out_channels[i]
__SCREAMING_SNAKE_CASE: Any = i == len(_lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__SCREAMING_SNAKE_CASE: str = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE: Tuple = FlaxDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = down_blocks
# mid
__SCREAMING_SNAKE_CASE: Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__SCREAMING_SNAKE_CASE: Optional[int] = []
__SCREAMING_SNAKE_CASE: Tuple = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Optional[int] = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: str = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__SCREAMING_SNAKE_CASE: int = output_channel
__SCREAMING_SNAKE_CASE: List[str] = reversed_block_out_channels[i]
__SCREAMING_SNAKE_CASE: List[str] = reversed_block_out_channels[min(i + 1 , len(_lowerCAmelCase ) - 1 )]
__SCREAMING_SNAKE_CASE: Union[str, Any] = i == len(_lowerCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__SCREAMING_SNAKE_CASE: Optional[int] = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE: int = FlaxUpBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = output_channel
__SCREAMING_SNAKE_CASE: Union[str, Any] = up_blocks
# out
__SCREAMING_SNAKE_CASE: Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__SCREAMING_SNAKE_CASE: Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase = True , _lowerCAmelCase = False , ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , jnp.ndarray ):
__SCREAMING_SNAKE_CASE: Dict = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE: Optional[Any] = timesteps.astype(dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE: Union[str, Any] = jnp.expand_dims(_lowerCAmelCase , 0 )
__SCREAMING_SNAKE_CASE: Any = self.time_proj(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.time_embedding(_lowerCAmelCase )
# 2. pre-process
__SCREAMING_SNAKE_CASE: int = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE: List[str] = self.conv_in(_lowerCAmelCase )
# 3. down
__SCREAMING_SNAKE_CASE: Dict = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[Any] = down_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Tuple = down_block(_lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__SCREAMING_SNAKE_CASE: Union[str, Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCAmelCase , _lowerCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__SCREAMING_SNAKE_CASE: Union[str, Any] = new_down_block_res_samples
# 4. mid
__SCREAMING_SNAKE_CASE: Dict = self.mid_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__SCREAMING_SNAKE_CASE: Union[str, Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
__SCREAMING_SNAKE_CASE: Union[str, Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Any = up_block(
_lowerCAmelCase , temb=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , res_hidden_states_tuple=_lowerCAmelCase , deterministic=not train , )
else:
__SCREAMING_SNAKE_CASE: List[str] = up_block(_lowerCAmelCase , temb=_lowerCAmelCase , res_hidden_states_tuple=_lowerCAmelCase , deterministic=not train )
# 6. post-process
__SCREAMING_SNAKE_CASE: Optional[Any] = self.conv_norm_out(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = nn.silu(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = self.conv_out(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = jnp.transpose(_lowerCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCAmelCase )
| 202 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _UpperCamelCase( lowercase__ ):
__SCREAMING_SNAKE_CASE : torch.FloatTensor
__SCREAMING_SNAKE_CASE : torch.FloatTensor
__SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None
class _UpperCamelCase( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 1.007 , SCREAMING_SNAKE_CASE__ : float = 8_0 , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 5_0 , ):
'''simple docstring'''
__a : List[Any] = sigma_max
# setable values
__a : Tuple = None
__a : Optional[int] = None
__a : Optional[Any] = None # sigma(t_i)
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None ):
'''simple docstring'''
__a : List[str] = num_inference_steps
__a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
__a : str = torch.from_numpy(__lowercase ).to(__lowercase )
__a : str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__a : Tuple = torch.tensor(__lowercase , dtype=torch.floataa , device=__lowercase )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
__a : Any = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__a : List[Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
__a : str = self.config.s_noise * randn_tensor(sample.shape , generator=__lowercase ).to(sample.device )
__a : Optional[Any] = sigma + gamma * sigma
__a : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True , ):
'''simple docstring'''
__a : Dict = sample_hat + sigma_hat * model_output
__a : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
__a : Any = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__lowercase , derivative=__lowercase , pred_original_sample=__lowercase )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True , ):
'''simple docstring'''
__a : Optional[int] = sample_prev + sigma_prev * model_output
__a : str = (sample_prev - pred_original_sample) / sigma_prev
__a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__lowercase , derivative=__lowercase , pred_original_sample=__lowercase )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
| 715 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = '''xlm'''
__SCREAMING_SNAKE_CASE : int = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_1_4_5 , SCREAMING_SNAKE_CASE__ : int=2_0_4_8 , SCREAMING_SNAKE_CASE__ : int=1_2 , SCREAMING_SNAKE_CASE__ : Any=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Dict=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8**-0.5 , SCREAMING_SNAKE_CASE__ : Dict=1e-12 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="first" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Dict=0 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
__a : Optional[Any] = vocab_size
__a : int = emb_dim
__a : Tuple = n_layers
__a : List[str] = n_heads
__a : Any = dropout
__a : Any = attention_dropout
__a : Any = gelu_activation
__a : Optional[int] = sinusoidal_embeddings
__a : Union[str, Any] = causal
__a : str = asm
__a : Optional[Any] = n_langs
__a : int = use_lang_emb
__a : List[str] = layer_norm_eps
__a : Optional[int] = bos_index
__a : Any = eos_index
__a : str = pad_index
__a : List[str] = unk_index
__a : List[Any] = mask_index
__a : Tuple = is_encoder
__a : str = max_position_embeddings
__a : Any = embed_init_std
__a : int = init_std
__a : Dict = summary_type
__a : List[Any] = summary_use_proj
__a : Dict = summary_activation
__a : Union[str, Any] = summary_proj_to_labels
__a : List[Any] = summary_first_dropout
__a : List[Any] = start_n_top
__a : Tuple = end_n_top
__a : int = mask_token_id
__a : str = lang_id
if "n_words" in kwargs:
__a : Optional[Any] = kwargs['n_words']
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _UpperCamelCase( __lowerCamelCase ):
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 577 | 0 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_UpperCamelCase = logging.get_logger(__name__)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ) -> int:
# Recurse if needed
if "." in tensor_name:
__lowerCamelCase : Optional[int] = tensor_name.split('.' )
for split in splits[:-1]:
__lowerCamelCase : List[Any] = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
__lowerCamelCase : Optional[int] = new_module
__lowerCamelCase : List[str] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
__lowerCamelCase : Dict = tensor_name in module._buffers
__lowerCamelCase : int = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
if is_buffer or not is_bitsandbytes_available():
__lowerCamelCase : str = False
__lowerCamelCase : int = False
else:
__lowerCamelCase : Optional[Any] = hasattr(bnb.nn ,'Params4bit' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
__lowerCamelCase : Any = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
__lowerCamelCase : List[str] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__lowerCamelCase : Optional[int] = old_value.to(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase ,torch.Tensor ):
__lowerCamelCase : Any = value.to('cpu' )
if value.dtype == torch.inta:
__lowerCamelCase : str = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
__lowerCamelCase : Dict = torch.tensor(_lowerCAmelCase ,device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowerCAmelCase ) and fpaa_statistics is None:
__lowerCamelCase : Tuple = new_value.T
__lowerCamelCase : Any = old_value.__dict__
if is_abit:
__lowerCamelCase : Optional[int] = bnb.nn.IntaParams(_lowerCAmelCase ,requires_grad=_lowerCAmelCase ,**_lowerCAmelCase ).to(_lowerCAmelCase )
elif is_abit:
__lowerCamelCase : List[str] = bnb.nn.Paramsabit(_lowerCAmelCase ,requires_grad=_lowerCAmelCase ,**_lowerCAmelCase ).to(_lowerCAmelCase )
__lowerCamelCase : List[str] = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'SCB' ,fpaa_statistics.to(_lowerCAmelCase ) )
else:
if value is None:
__lowerCamelCase : str = old_value.to(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase ,torch.Tensor ):
__lowerCamelCase : str = value.to(_lowerCAmelCase )
else:
__lowerCamelCase : Any = torch.tensor(_lowerCAmelCase ,device=_lowerCAmelCase )
if is_buffer:
__lowerCamelCase : Optional[Any] = new_value
else:
__lowerCamelCase : str = nn.Parameter(_lowerCAmelCase ,requires_grad=old_value.requires_grad )
__lowerCamelCase : str = new_value
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=False ) -> Dict:
for name, module in model.named_children():
if current_key_name is None:
__lowerCamelCase : Optional[int] = []
current_key_name.append(_lowerCAmelCase )
if (isinstance(_lowerCAmelCase ,nn.Linear ) or isinstance(_lowerCAmelCase ,_lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(_lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase ,__lowerCamelCase : List[str] = module.weight.shape
else:
__lowerCamelCase : List[str] = module.in_features
__lowerCamelCase : str = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__lowerCamelCase : str = bnb.nn.LinearabitLt(
_lowerCAmelCase ,_lowerCAmelCase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
__lowerCamelCase : Tuple = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__lowerCamelCase : str = bnb.nn.Linearabit(
_lowerCAmelCase ,_lowerCAmelCase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
__lowerCamelCase : int = True
# Store the module class in case we need to transpose the weight later
__lowerCamelCase : str = type(_lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowerCAmelCase )
if len(list(module.children() ) ) > 0:
__lowerCamelCase ,__lowerCamelCase : Dict = _replace_with_bnb_linear(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,has_been_replaced=_lowerCAmelCase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ) -> Optional[Any]:
__lowerCamelCase : Dict = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
__lowerCamelCase ,__lowerCamelCase : int = _replace_with_bnb_linear(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def a_ ( *_lowerCAmelCase ,**_lowerCAmelCase ) -> str:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' ,_lowerCAmelCase ,)
return replace_with_bnb_linear(*_lowerCAmelCase ,**_lowerCAmelCase )
def a_ ( *_lowerCAmelCase ,**_lowerCAmelCase ) -> Tuple:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' ,_lowerCAmelCase ,)
return set_module_quantized_tensor_to_device(*_lowerCAmelCase ,**_lowerCAmelCase )
def a_ ( _lowerCAmelCase ) -> int:
__lowerCamelCase : List[str] = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__lowerCamelCase : Any = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase : List[str] = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
__lowerCamelCase : str = sum(_lowerCAmelCase ,[] )
__lowerCamelCase : str = len(_lowerCAmelCase ) > 0
# Check if it is a base model
__lowerCamelCase : List[Any] = not hasattr(_lowerCAmelCase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__lowerCamelCase : Optional[int] = list(model.named_children() )
__lowerCamelCase : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
__lowerCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
__lowerCamelCase : int = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
__lowerCamelCase : Union[str, Any] = ['.weight', '.bias']
__lowerCamelCase : str = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__lowerCamelCase : Union[str, Any] = name.replace(_lowerCAmelCase ,'' )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
| 459 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git_vision_model'
def __init__( self :Dict , _lowercase :Dict=7_68 , _lowercase :Dict=30_72 , _lowercase :Tuple=12 , _lowercase :List[str]=12 , _lowercase :Tuple=3 , _lowercase :Dict=2_24 , _lowercase :Tuple=16 , _lowercase :Optional[int]="quick_gelu" , _lowercase :Union[str, Any]=1e-5 , _lowercase :Tuple=0.0 , _lowercase :Tuple=0.02 , **_lowercase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls :List[str] , _lowercase :Union[str, os.PathLike] , **_lowercase :Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git'
def __init__( self :Union[str, Any] , _lowercase :Dict=None , _lowercase :List[str]=3_05_22 , _lowercase :Tuple=7_68 , _lowercase :Any=6 , _lowercase :Dict=12 , _lowercase :Any=30_72 , _lowercase :List[Any]="gelu" , _lowercase :Tuple=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[Any]=10_24 , _lowercase :Any=0.02 , _lowercase :int=1e-12 , _lowercase :List[Any]=0 , _lowercase :int="absolute" , _lowercase :List[str]=True , _lowercase :Any=False , _lowercase :int=1_01 , _lowercase :str=1_02 , _lowercase :Dict=None , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase__ = GitVisionConfig(**_lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 655 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1000 ) -> Optional[Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
SCREAMING_SNAKE_CASE_ : Tuple = n - 1
SCREAMING_SNAKE_CASE_ : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
while count < prec:
SCREAMING_SNAKE_CASE_ : Optional[Any] = random.randint(2 , n - 1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = bin_exp_mod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b != 1:
SCREAMING_SNAKE_CASE_ : List[Any] = True
for _ in range(SCREAMING_SNAKE_CASE ):
if b == n - 1:
SCREAMING_SNAKE_CASE_ : Any = False
break
SCREAMING_SNAKE_CASE_ : List[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__: Optional[Any] = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 311 |
from jiwer import compute_measures
import datasets
lowerCAmelCase__: Union[str, Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowerCAmelCase__: str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
lowerCAmelCase__: int = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def __A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False ):
if concatenate_texts:
return compute_measures(__lowerCAmelCase , __lowerCAmelCase )["wer"]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
for prediction, reference in zip(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = compute_measures(__lowerCAmelCase , __lowerCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : List[str] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Optional[int] = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = '''xlm'''
__SCREAMING_SNAKE_CASE : int = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_1_4_5 , SCREAMING_SNAKE_CASE__ : int=2_0_4_8 , SCREAMING_SNAKE_CASE__ : int=1_2 , SCREAMING_SNAKE_CASE__ : Any=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Dict=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8**-0.5 , SCREAMING_SNAKE_CASE__ : Dict=1e-12 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="first" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Dict=0 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
__a : Optional[Any] = vocab_size
__a : int = emb_dim
__a : Tuple = n_layers
__a : List[str] = n_heads
__a : Any = dropout
__a : Any = attention_dropout
__a : Any = gelu_activation
__a : Optional[int] = sinusoidal_embeddings
__a : Union[str, Any] = causal
__a : str = asm
__a : Optional[Any] = n_langs
__a : int = use_lang_emb
__a : List[str] = layer_norm_eps
__a : Optional[int] = bos_index
__a : Any = eos_index
__a : str = pad_index
__a : List[str] = unk_index
__a : List[Any] = mask_index
__a : Tuple = is_encoder
__a : str = max_position_embeddings
__a : Any = embed_init_std
__a : int = init_std
__a : Dict = summary_type
__a : List[Any] = summary_use_proj
__a : Dict = summary_activation
__a : Union[str, Any] = summary_proj_to_labels
__a : List[Any] = summary_first_dropout
__a : List[Any] = start_n_top
__a : Tuple = end_n_top
__a : int = mask_token_id
__a : str = lang_id
if "n_words" in kwargs:
__a : Optional[Any] = kwargs['n_words']
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _UpperCamelCase( __lowerCamelCase ):
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 577 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = XLNetTokenizer
__SCREAMING_SNAKE_CASE : str = XLNetTokenizerFast
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE : Tuple = True
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a : str = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Tuple = '<s>'
__a : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<eod>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1_0_0_6 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Any = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
__a : Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
__a : Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
__a : str = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Union[str, Any] = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
__a : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Any = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
__a : Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : str = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
__a : List[str] = tokenizer.encode('sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
__a : int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : str = {'input_ids': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 577 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__UpperCAmelCase = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_A = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'})
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
_A = field(default=UpperCAmelCase_ , metadata={'help': 'Set this flag to use fast tokenization.'})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'})
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
_A = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase , lowerCamelCase , lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase , lowerCamelCase , lowerCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
lowerCamelCase = import_module("tasks" )
try:
lowerCamelCase = getattr(UpperCAmelCase__ , model_args.task_type )
lowerCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCamelCase = token_classification_task.get_labels(data_args.labels )
lowerCamelCase = dict(enumerate(UpperCAmelCase__ ) )
lowerCamelCase = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , )
lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCamelCase = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCamelCase = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple[List[int], List[int]]:
lowerCamelCase = np.argmax(UpperCAmelCase__ , axis=2 )
lowerCamelCase , lowerCamelCase = preds.shape
lowerCamelCase = [[] for _ in range(UpperCAmelCase__ )]
lowerCamelCase = [[] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase__ ) -> Dict:
lowerCamelCase , lowerCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
# Data collator
lowerCamelCase = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCamelCase = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase = trainer.evaluate()
lowerCamelCase = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCAmelCase__ )
# Predict
if training_args.do_predict:
lowerCamelCase = TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCamelCase , lowerCamelCase , lowerCamelCase = trainer.predict(UpperCAmelCase__ )
lowerCamelCase , lowerCamelCase = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
lowerCamelCase = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return results
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 623 | 0 |
'''simple docstring'''
from __future__ import annotations
a : str = list[tuple[int, int]]
a : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a : Optional[int] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class UpperCamelCase_ :
def __init__( self , A , A , A , A , A , A , ) -> str:
UpperCAmelCase : List[str] = pos_x
UpperCAmelCase : List[str] = pos_y
UpperCAmelCase : Optional[int] = (pos_y, pos_x)
UpperCAmelCase : int = goal_x
UpperCAmelCase : int = goal_y
UpperCAmelCase : Union[str, Any] = g_cost
UpperCAmelCase : Dict = parent
UpperCAmelCase : Tuple = self.calculate_heuristic()
def _lowercase( self ) -> float:
UpperCAmelCase : int = abs(self.pos_x - self.goal_x )
UpperCAmelCase : List[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , A ) -> bool:
return self.f_cost < other.f_cost
class UpperCamelCase_ :
def __init__( self , A , A ) -> int:
UpperCAmelCase : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A )
UpperCAmelCase : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , A )
UpperCAmelCase : Union[str, Any] = [self.start]
UpperCAmelCase : list[Node] = []
UpperCAmelCase : Any = False
def _lowercase( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase : List[Any] = True
return self.retrace_path(A )
self.closed_nodes.append(A )
UpperCAmelCase : Any = self.get_successors(A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A )
else:
# retrieve the best current path
UpperCAmelCase : Tuple = self.open_nodes.pop(self.open_nodes.index(A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A )
else:
self.open_nodes.append(A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase( self , A ) -> list[Node]:
UpperCAmelCase : int = []
for action in delta:
UpperCAmelCase : Optional[int] = parent.pos_x + action[1]
UpperCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A , A , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A , ) )
return successors
def _lowercase( self , A ) -> Path:
UpperCAmelCase : str = node
UpperCAmelCase : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase : Union[str, Any] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
a : str = (0, 0)
a : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
a : Optional[Any] = GreedyBestFirst(init, goal)
a : Optional[Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
a : Any = 2
for elem in grid:
print(elem)
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __lowercase ( a__ , a__ , a__ , a__ , a__ , a__ ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(a__ ):
for x in range(a__ ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 1_80 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCAmelCase__ : str =imread('''../image_data/lena.jpg''')
# turn image in gray scale value
lowerCAmelCase__ : Optional[int] =cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCAmelCase__ : Union[str, Any] =np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCAmelCase__ : Tuple =gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCAmelCase__ : Optional[Any] =out / out.max() * 255
lowerCAmelCase__ : Union[str, Any] =out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 148 |
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase__ : Optional[int] ='''docs/source/en/_toctree.yml'''
def __lowercase ( a__ ) -> List[Any]:
__SCREAMING_SNAKE_CASE = defaultdict(a__ )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(a__ )
__SCREAMING_SNAKE_CASE = new_doc_list
__SCREAMING_SNAKE_CASE = [key for key, value in counts.items() if value > 1]
__SCREAMING_SNAKE_CASE = []
for duplicate_key in duplicates:
__SCREAMING_SNAKE_CASE = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(a__ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__SCREAMING_SNAKE_CASE = sorted(a__ , key=lambda a__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(a__ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(a__ )
# Sort
return overview_doc
def __lowercase ( a__=False ) -> List[Any]:
with open(a__ , encoding='utf-8' ) as f:
__SCREAMING_SNAKE_CASE = yaml.safe_load(f.read() )
# Get to the API doc
__SCREAMING_SNAKE_CASE = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__SCREAMING_SNAKE_CASE = content[api_idx]['sections']
# Then to the model doc
__SCREAMING_SNAKE_CASE = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__SCREAMING_SNAKE_CASE = api_doc[scheduler_idx]['sections']
__SCREAMING_SNAKE_CASE = clean_doc_toc(a__ )
__SCREAMING_SNAKE_CASE = False
if new_scheduler_doc != scheduler_doc:
__SCREAMING_SNAKE_CASE = True
if overwrite:
__SCREAMING_SNAKE_CASE = new_scheduler_doc
if diff:
if overwrite:
__SCREAMING_SNAKE_CASE = api_doc
with open(a__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(a__ , allow_unicode=a__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def __lowercase ( a__=False ) -> Union[str, Any]:
with open(a__ , encoding='utf-8' ) as f:
__SCREAMING_SNAKE_CASE = yaml.safe_load(f.read() )
# Get to the API doc
__SCREAMING_SNAKE_CASE = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__SCREAMING_SNAKE_CASE = content[api_idx]['sections']
# Then to the model doc
__SCREAMING_SNAKE_CASE = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = api_doc[pipeline_idx]['sections']
__SCREAMING_SNAKE_CASE = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__SCREAMING_SNAKE_CASE = pipeline_doc['section']
__SCREAMING_SNAKE_CASE = clean_doc_toc(a__ )
if overwrite:
__SCREAMING_SNAKE_CASE = new_sub_pipeline_doc
new_pipeline_docs.append(a__ )
# sort overall pipeline doc
__SCREAMING_SNAKE_CASE = clean_doc_toc(a__ )
if new_pipeline_docs != pipeline_docs:
__SCREAMING_SNAKE_CASE = True
if overwrite:
__SCREAMING_SNAKE_CASE = new_pipeline_docs
if diff:
if overwrite:
__SCREAMING_SNAKE_CASE = api_doc
with open(a__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(a__ , allow_unicode=a__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ : str =argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase__ : Optional[int] =parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 148 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase = 3
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
lowercase__ = random.randrange(3 , SCREAMING_SNAKE_CASE )
if pow(SCREAMING_SNAKE_CASE , 2 , SCREAMING_SNAKE_CASE ) == 1:
continue
if pow(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) == 1:
continue
return g
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
print('''Generating prime p...''' )
lowercase__ = rabin_miller.generate_large_prime(SCREAMING_SNAKE_CASE ) # select large prime number.
lowercase__ = primitive_root(SCREAMING_SNAKE_CASE ) # one primitive root on modulo p.
lowercase__ = random.randrange(3 , SCREAMING_SNAKE_CASE ) # private_key -> have to be greater than 2 for safety.
lowercase__ = cryptomath.find_mod_inverse(pow(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
lowercase__ = (key_size, e_a, e_a, p)
lowercase__ = (key_size, d)
return public_key, private_key
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
lowercase__ , lowercase__ = generate_key(SCREAMING_SNAKE_CASE )
print(f'\nWriting public key to file {name}_pubkey.txt...' )
with open(f'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(f'Writing private key to file {name}_privkey.txt...' )
with open(f'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(f'{private_key[0]},{private_key[1]}' )
def _a ( ):
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 20_48 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 717 |
def _a ( SCREAMING_SNAKE_CASE = 1_00 ):
"""simple docstring"""
lowercase__ = (n * (n + 1) // 2) ** 2
lowercase__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 429 | 0 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def A_ ( snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(snake_case_ )
UpperCamelCase : Dict = flatten_dict(snake_case_ )
return flax_params
def A_ ( snake_case_ : Optional[int] ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Tuple = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
UpperCamelCase : Optional[int] = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCamelCase : List[str] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCamelCase : List[str] = new_key.replace(snake_case_ ,snake_case_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCamelCase : str = new_key.replace(snake_case_ ,snake_case_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCamelCase : List[str] = re.sub(R"""layers_(\d+)""" ,R"""layer.\1""" ,snake_case_ )
UpperCamelCase : Union[str, Any] = new_key.replace("""encoder""" ,"""encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCamelCase : Any = re.sub(R"""layers_(\d+)""" ,R"""layer.\1""" ,snake_case_ )
UpperCamelCase : Any = flax_dict[key]
UpperCamelCase : Optional[Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCamelCase : Union[str, Any] = torch.from_numpy(converted_dict[key].T )
else:
UpperCamelCase : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def A_ ( snake_case_ : Optional[Any] ,snake_case_ : Dict ,snake_case_ : str=False ,snake_case_ : Dict=False ):
'''simple docstring'''
UpperCamelCase : Optional[int] = get_flax_param(snake_case_ )
if not use_large:
UpperCamelCase : List[str] = PixaStructVisionConfig()
UpperCamelCase : int = PixaStructTextConfig()
else:
UpperCamelCase : List[str] = PixaStructVisionConfig(
hidden_size=1_5_3_6 ,d_ff=3_9_6_8 ,num_attention_heads=2_4 ,num_hidden_layers=1_8 )
UpperCamelCase : Tuple = PixaStructTextConfig(hidden_size=1_5_3_6 ,d_ff=3_9_6_8 ,num_heads=2_4 ,num_layers=1_8 )
UpperCamelCase : List[Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() ,text_config=decoder_config.to_dict() ,is_vqa=snake_case_ )
UpperCamelCase : Optional[int] = PixaStructForConditionalGeneration(snake_case_ )
UpperCamelCase : Optional[Any] = rename_and_convert_flax_params(snake_case_ )
model.load_state_dict(snake_case_ )
UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
UpperCamelCase : Dict = PixaStructImageProcessor()
UpperCamelCase : List[str] = PixaStructProcessor(image_processor=snake_case_ ,tokenizer=snake_case_ )
if use_large:
UpperCamelCase : int = 4_0_9_6
UpperCamelCase : int = True
# mkdir if needed
os.makedirs(snake_case_ ,exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
print("""Model saved in {}""".format(snake_case_ ) )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__A : Optional[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 499 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A_ ( snake_case_ : Optional[Any] ,snake_case_ : Optional[int] ,snake_case_ : Dict ,snake_case_ : Any ,snake_case_ : Union[str, Any]=True ,snake_case_ : Dict="pt" ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {"""add_prefix_space""": True} if isinstance(snake_case_ ,snake_case_ ) and not line.startswith(""" """ ) else {}
UpperCamelCase : Optional[int] = padding_side
return tokenizer(
[line] ,max_length=snake_case_ ,padding="""max_length""" if pad_to_max_length else None ,truncation=snake_case_ ,return_tensors=snake_case_ ,add_special_tokens=snake_case_ ,**snake_case_ ,)
def A_ ( snake_case_ : int ,snake_case_ : Tuple ,snake_case_ : Tuple=None ,):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = input_ids.ne(snake_case_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="train" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="" , ):
super().__init__()
UpperCamelCase : str = Path(SCREAMING_SNAKE_CASE_ ).joinpath(type_path + """.source""" )
UpperCamelCase : Optional[int] = Path(SCREAMING_SNAKE_CASE_ ).joinpath(type_path + """.target""" )
UpperCamelCase : List[str] = self.get_char_lens(self.src_file )
UpperCamelCase : List[Any] = max_source_length
UpperCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
UpperCamelCase : str = tokenizer
UpperCamelCase : List[str] = prefix
if n_obs is not None:
UpperCamelCase : Optional[int] = self.src_lens[:n_obs]
UpperCamelCase : Any = src_lang
UpperCamelCase : List[Any] = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = index + 1 # linecache starts at 1
UpperCamelCase : Dict = self.prefix + linecache.getline(str(self.src_file ) , SCREAMING_SNAKE_CASE_ ).rstrip("""\n""" )
UpperCamelCase : List[str] = linecache.getline(str(self.tgt_file ) , SCREAMING_SNAKE_CASE_ ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCamelCase : Union[str, Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ ) else self.tokenizer
)
UpperCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ ) else self.tokenizer
UpperCamelCase : List[Any] = encode_line(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.max_source_length , """right""" )
UpperCamelCase : int = encode_line(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.max_target_length , """right""" )
UpperCamelCase : Dict = source_inputs["""input_ids"""].squeeze()
UpperCamelCase : Tuple = target_inputs["""input_ids"""].squeeze()
UpperCamelCase : Tuple = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a_ ( SCREAMING_SNAKE_CASE_ ):
return [len(SCREAMING_SNAKE_CASE_ ) for x in Path(SCREAMING_SNAKE_CASE_ ).open().readlines()]
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = torch.stack([x["""input_ids"""] for x in batch] )
UpperCamelCase : List[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
UpperCamelCase : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
UpperCamelCase : Optional[int] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ )
else self.tokenizer.pad_token_id
)
UpperCamelCase : Any = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ )
else self.tokenizer.pad_token_id
)
UpperCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : Union[str, Any] = trim_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__A : int = getLogger(__name__)
def A_ ( snake_case_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(snake_case_ ) )
def A_ ( snake_case_ : str ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = get_git_info()
save_json(snake_case_ ,os.path.join(snake_case_ ,"""git_log.json""" ) )
def A_ ( snake_case_ : int ,snake_case_ : int ,snake_case_ : Optional[int]=4 ,**snake_case_ : Optional[int] ):
'''simple docstring'''
with open(snake_case_ ,"""w""" ) as f:
json.dump(snake_case_ ,snake_case_ ,indent=snake_case_ ,**snake_case_ )
def A_ ( snake_case_ : List[Any] ):
'''simple docstring'''
with open(snake_case_ ) as f:
return json.load(snake_case_ )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Dict = git.Repo(search_parent_directories=snake_case_ )
UpperCamelCase : Dict = {
"""repo_id""": str(snake_case_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def A_ ( snake_case_ : Callable ,snake_case_ : Iterable ):
'''simple docstring'''
return list(map(snake_case_ ,snake_case_ ) )
def A_ ( snake_case_ : str ,snake_case_ : Union[str, Any] ):
'''simple docstring'''
with open(snake_case_ ,"""wb""" ) as f:
return pickle.dump(snake_case_ ,snake_case_ )
def A_ ( snake_case_ : Tuple ):
'''simple docstring'''
def remove_articles(snake_case_ : Union[str, Any] ):
return re.sub(R"""\b(a|an|the)\b""" ,""" """ ,snake_case_ )
def white_space_fix(snake_case_ : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(snake_case_ : Optional[int] ):
UpperCamelCase : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case_ : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case_ ) ) ) )
def A_ ( snake_case_ : List[str] ,snake_case_ : List[str] ):
'''simple docstring'''
UpperCamelCase : int = normalize_answer(snake_case_ ).split()
UpperCamelCase : List[Any] = normalize_answer(snake_case_ ).split()
UpperCamelCase : List[str] = Counter(snake_case_ ) & Counter(snake_case_ )
UpperCamelCase : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
UpperCamelCase : str = 1.0 * num_same / len(snake_case_ )
UpperCamelCase : Union[str, Any] = 1.0 * num_same / len(snake_case_ )
UpperCamelCase : Tuple = (2 * precision * recall) / (precision + recall)
return fa
def A_ ( snake_case_ : int ,snake_case_ : Tuple ):
'''simple docstring'''
return normalize_answer(snake_case_ ) == normalize_answer(snake_case_ )
def A_ ( snake_case_ : List[str] ,snake_case_ : List[str] ):
'''simple docstring'''
assert len(snake_case_ ) == len(snake_case_ )
UpperCamelCase : Optional[int] = 0
for hypo, pred in zip(snake_case_ ,snake_case_ ):
em += exact_match_score(snake_case_ ,snake_case_ )
if len(snake_case_ ) > 0:
em /= len(snake_case_ )
return {"em": em}
def A_ ( snake_case_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def A_ ( snake_case_ : List[str] ,snake_case_ : List[str] ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCamelCase : Dict = """dropout_rate"""
for p in extra_params:
if getattr(snake_case_ ,snake_case_ ,snake_case_ ):
if not hasattr(snake_case_ ,snake_case_ ) and not hasattr(snake_case_ ,equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(snake_case_ ) )
delattr(snake_case_ ,snake_case_ )
continue
UpperCamelCase : Union[str, Any] = p if hasattr(snake_case_ ,snake_case_ ) else equivalent_param[p]
setattr(snake_case_ ,snake_case_ ,getattr(snake_case_ ,snake_case_ ) )
delattr(snake_case_ ,snake_case_ )
return hparams, config
| 499 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> list:
'''simple docstring'''
A = []
A , A = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
A = result + left + right
return input_list
def lowerCamelCase_ ( lowerCAmelCase__ : list ) -> list:
'''simple docstring'''
if len(lowerCAmelCase__ ) <= 1:
return input_list
A = list(lowerCAmelCase__ )
# iteration for two-way merging
A = 2
while p <= len(lowerCAmelCase__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
A = i
A = i + p - 1
A = (low + high + 1) // 2
A = merge(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase__ ):
A = i
A = merge(lowerCAmelCase__ , 0 , lowerCAmelCase__ , len(lowerCAmelCase__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__snake_case :List[str] =input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__snake_case :str =[]
else:
__snake_case :List[Any] =[int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted)) | 224 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase__ : int = 4 ) -> list[list[int]]:
'''simple docstring'''
A = abs(lowerCAmelCase__ ) or 4
return [[1 + x + y * row_size for x in range(lowerCAmelCase__ )] for y in range(lowerCAmelCase__ )]
def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(lowerCAmelCase__ ) )
# OR.. transpose(reverse_column(matrix))
def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(lowerCAmelCase__ ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(lowerCAmelCase__ ) )
# OR.. transpose(reverse_row(matrix))
def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
A = [list(lowerCAmelCase__ ) for x in zip(*lowerCAmelCase__ )]
return matrix
def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
A = matrix[::-1]
return matrix
def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
A = [x[::-1] for x in matrix]
return matrix
def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] ) -> None:
'''simple docstring'''
for i in matrix:
print(*lowerCAmelCase__ )
if __name__ == "__main__":
__snake_case :Dict =make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__snake_case :Optional[Any] =make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__snake_case :Tuple =make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix)) | 224 | 1 |
'''simple docstring'''
class __lowercase :
def __init__( self , UpperCamelCase ) -> int:
__a = len(UpperCamelCase )
__a = [0] * len_array
if len_array > 0:
__a = array[0]
for i in range(1 , UpperCamelCase ):
__a = self.prefix_sum[i - 1] + array[i]
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase ) -> Tuple:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def UpperCamelCase__ ( self , UpperCamelCase ) -> Optional[int]:
__a = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 539 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = None
def _snake_case ( lowercase__ , lowercase__=0.9_9_9 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCamelCase : Optional[Any] = []
for i in range(lowercase__ ):
_lowerCamelCase : Optional[Any] = i / num_diffusion_timesteps
_lowerCamelCase : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class lowerCAmelCase__ ( lowercase, lowercase ):
'''simple docstring'''
lowerCamelCase__ = 1
@register_to_config
def __init__( self , lowercase = 1000 , lowercase = 0.00_01 , lowercase = 0.02 , lowercase = "linear" , lowercase = None , lowercase = True , lowercase = True , lowercase = 0 , lowercase = "epsilon" , lowercase = 1.0 , **lowercase , ):
if kwargs.get('set_alpha_to_one' , lowercase ) is not None:
_lowerCamelCase : List[str] = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , lowercase , standard_warn=lowercase )
_lowerCamelCase : Union[str, Any] = kwargs['set_alpha_to_one']
if trained_betas is not None:
_lowerCamelCase : Tuple = torch.tensor(lowercase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCamelCase : List[str] = torch.linspace(lowercase , lowercase , lowercase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCamelCase : Any = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowercase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCamelCase : Tuple = betas_for_alpha_bar(lowercase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCamelCase : str = 1.0 - self.betas
_lowerCamelCase : str = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowerCamelCase : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowerCamelCase : Optional[Any] = 1.0
# setable values
_lowerCamelCase : Dict = None
_lowerCamelCase : int = torch.from_numpy(np.arange(0 , lowercase ).copy().astype(np.intaa ) )
def A_ ( self , lowercase , lowercase = None ):
return sample
def A_ ( self , lowercase , lowercase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
_lowerCamelCase : int = num_inference_steps
_lowerCamelCase : Optional[int] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCamelCase : Any = (np.arange(0 , lowercase ) * step_ratio).round().copy().astype(np.intaa )
_lowerCamelCase : Any = torch.from_numpy(lowercase ).to(lowercase )
self.timesteps += self.config.steps_offset
def A_ ( self , lowercase , lowercase , lowercase , lowercase = 0.0 , lowercase = False , lowercase = None , lowercase = True , ):
# 1. get previous step value (=t+1)
_lowerCamelCase : int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowerCamelCase : Dict = self.alphas_cumprod[timestep]
_lowerCamelCase : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowerCamelCase : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowerCamelCase : Any = model_output
elif self.config.prediction_type == "sample":
_lowerCamelCase : Optional[Any] = model_output
_lowerCamelCase : Tuple = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowerCamelCase : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowerCamelCase : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCamelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCamelCase : Dict = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase )
def __len__( self ):
return self.config.num_train_timesteps | 630 | 0 |
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = 1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["image_processor", "tokenizer"]
UpperCAmelCase_ = "AutoImageProcessor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : List[Any], _UpperCAmelCase : Any=None, _UpperCAmelCase : int=None, **_UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : int = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = False
def __call__( self : Optional[Any], *_UpperCAmelCase : Any, **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("images", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : str = args[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE__ : Any = self.image_processor(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : int = encodings["input_ids"]
return inputs
def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[Any], *_UpperCAmelCase : int, **_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = False
def A_ ( self : str, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any=False, _UpperCAmelCase : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if added_vocab is None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
while tokens:
SCREAMING_SNAKE_CASE__ : Dict = re.search(r"<s_(.*?)>", _UpperCAmelCase, re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE__ : List[Any] = start_token.group(1 )
SCREAMING_SNAKE_CASE__ : str = re.search(rF'''</s_{key}>''', _UpperCAmelCase, re.IGNORECASE )
SCREAMING_SNAKE_CASE__ : List[str] = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = tokens.replace(_UpperCAmelCase, "" )
else:
SCREAMING_SNAKE_CASE__ : int = end_token.group()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', _UpperCAmelCase, re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenajson(_UpperCAmelCase, is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if value:
if len(_UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE__ : List[str] = value[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = value
else: # leaf nodes
SCREAMING_SNAKE_CASE__ : Tuple = []
for leaf in content.split(r"<sep/>" ):
SCREAMING_SNAKE_CASE__ : str = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE__ : List[Any] = leaf[1:-2] # for categorical special tokens
output[key].append(_UpperCAmelCase )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE__ : int = output[key][0]
SCREAMING_SNAKE_CASE__ : int = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if len(_UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, )
return self.image_processor_class
@property
def A_ ( self : Any ) -> int:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, )
return self.image_processor
| 157 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ) -> set:
lowerCamelCase_ = set()
# edges = list of graph's edges
lowerCamelCase_ = get_edges(__UpperCamelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCamelCase_ ,lowerCamelCase_ = edges.pop()
chosen_vertices.add(__UpperCamelCase )
chosen_vertices.add(__UpperCamelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__UpperCamelCase )
return chosen_vertices
def _UpperCamelCase ( __UpperCamelCase ) -> set:
lowerCamelCase_ = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 42 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__ : int = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
__magic_name__ : Optional[int] = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
__magic_name__ : str = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
__magic_name__ : Optional[Any] = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =tempfile.mkdtemp()
UpperCAmelCase__ =BlipImageProcessor()
UpperCAmelCase__ =GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
UpperCAmelCase__ =BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
UpperCAmelCase__ =InstructBlipProcessor(A_, A_, A_ )
processor.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self, **A_ ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname, **A_ ).tokenizer
def __UpperCAmelCase ( self, **A_ ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname, **A_ ).image_processor
def __UpperCAmelCase ( self, **A_ ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname, **A_ ).qformer_tokenizer
def __UpperCAmelCase ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =[np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
UpperCAmelCase__ =[Image.fromarray(np.moveaxis(A_, 0, -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =InstructBlipProcessor(
tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor(), qformer_tokenizer=self.get_qformer_tokenizer(), )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ =self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)" )
UpperCAmelCase__ =self.get_image_processor(do_normalize=A_, padding_value=1.0 )
UpperCAmelCase__ =InstructBlipProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=A_, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, A_ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, A_ )
self.assertIsInstance(processor.qformer_tokenizer, A_ )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase__ =self.get_image_processor()
UpperCAmelCase__ =self.get_tokenizer()
UpperCAmelCase__ =self.get_qformer_tokenizer()
UpperCAmelCase__ =InstructBlipProcessor(
tokenizer=A_, image_processor=A_, qformer_tokenizer=A_ )
UpperCAmelCase__ =self.prepare_image_inputs()
UpperCAmelCase__ =image_processor(A_, return_tensors="np" )
UpperCAmelCase__ =processor(images=A_, return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =self.get_image_processor()
UpperCAmelCase__ =self.get_tokenizer()
UpperCAmelCase__ =self.get_qformer_tokenizer()
UpperCAmelCase__ =InstructBlipProcessor(
tokenizer=A_, image_processor=A_, qformer_tokenizer=A_ )
UpperCAmelCase__ ="lower newer"
UpperCAmelCase__ =processor(text=A_ )
UpperCAmelCase__ =tokenizer(A_, return_token_type_ids=A_ )
UpperCAmelCase__ =qformer_tokenizer(A_, return_token_type_ids=A_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key], encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key], encoded_processor["qformer_" + key] )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =self.get_image_processor()
UpperCAmelCase__ =self.get_tokenizer()
UpperCAmelCase__ =self.get_qformer_tokenizer()
UpperCAmelCase__ =InstructBlipProcessor(
tokenizer=A_, image_processor=A_, qformer_tokenizer=A_ )
UpperCAmelCase__ ="lower newer"
UpperCAmelCase__ =self.prepare_image_inputs()
UpperCAmelCase__ =processor(text=A_, images=A_ )
self.assertListEqual(
list(inputs.keys() ), ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"], )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =self.get_image_processor()
UpperCAmelCase__ =self.get_tokenizer()
UpperCAmelCase__ =self.get_qformer_tokenizer()
UpperCAmelCase__ =InstructBlipProcessor(
tokenizer=A_, image_processor=A_, qformer_tokenizer=A_ )
UpperCAmelCase__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ =processor.batch_decode(A_ )
UpperCAmelCase__ =tokenizer.batch_decode(A_ )
self.assertListEqual(A_, A_ )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase__ =self.get_image_processor()
UpperCAmelCase__ =self.get_tokenizer()
UpperCAmelCase__ =self.get_qformer_tokenizer()
UpperCAmelCase__ =InstructBlipProcessor(
tokenizer=A_, image_processor=A_, qformer_tokenizer=A_ )
UpperCAmelCase__ ="lower newer"
UpperCAmelCase__ =self.prepare_image_inputs()
UpperCAmelCase__ =processor(text=A_, images=A_ )
self.assertListEqual(
list(inputs.keys() ), ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"], )
| 713 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
UpperCamelCase_ = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
UpperCamelCase_ = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
UpperCamelCase_ = 'zero2'
UpperCamelCase_ = 'zero3'
UpperCamelCase_ = [ZEROa, ZEROa]
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =parameterized.to_safe_name("_".join(str(A ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
UpperCamelCase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class snake_case_ ( a ):
'''simple docstring'''
@parameterized.expand(A_, name_func=A_ )
def __UpperCAmelCase ( self, A_, A_ ) -> List[str]:
self.run_and_check(
stage=A_, model=A_, distributed=A_, fpaa=A_, )
@require_torch_multi_gpu
@parameterized.expand(A_, name_func=A_ )
def __UpperCAmelCase ( self, A_, A_ ) -> Any:
self.run_and_check(
stage=A_, model=A_, distributed=A_, fpaa=A_, )
@parameterized.expand(A_, name_func=A_ )
def __UpperCAmelCase ( self, A_, A_ ) -> List[str]:
self.run_and_check(
stage=A_, model=A_, distributed=A_, fpaa=A_, )
@require_torch_multi_gpu
@parameterized.expand(A_, name_func=A_ )
def __UpperCAmelCase ( self, A_, A_ ) -> List[Any]:
self.run_and_check(
stage=A_, model=A_, distributed=A_, fpaa=A_, )
def __UpperCAmelCase ( self, A_ ) -> Optional[Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __UpperCAmelCase ( self, A_, A_, A_ = 10, A_ = True, A_ = True, A_ = True, ) -> Tuple:
UpperCAmelCase__ =models[model]
UpperCAmelCase__ =self.run_trainer(
stage=A_, model_name=A_, eval_steps=A_, num_train_epochs=1, distributed=A_, fpaa=A_, )
self.do_checks(A_ )
return output_dir
def __UpperCAmelCase ( self, A_, A_, A_ = 10, A_ = 1, A_ = True, A_ = True, ) -> List[Any]:
UpperCAmelCase__ =self.get_auto_remove_tmp_dir("./xxx", after=A_ )
UpperCAmelCase__ =f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(A_ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
UpperCAmelCase__ =f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
UpperCAmelCase__ =[f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
UpperCAmelCase__ =self.get_launcher(A_ )
UpperCAmelCase__ =launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A_, env=self.get_env() )
return output_dir
def __UpperCAmelCase ( self, A_=False ) -> Union[str, Any]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
UpperCAmelCase__ =min(2, get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 510 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Optional[int]:
__A : str = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class SCREAMING_SNAKE_CASE (a__ , a__ , a__ , unittest.TestCase ):
lowerCAmelCase = StableDiffusionLatentUpscalePipeline
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase = frozenset([] )
lowerCAmelCase = True
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = 1
__A : str = 4
__A : Dict = (16, 16)
__A : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(_UpperCAmelCase)
return image
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
torch.manual_seed(0)
__A : Dict = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=_UpperCAmelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=_UpperCAmelCase , only_cross_attention=_UpperCAmelCase , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
__A : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
__A : List[Any] = EulerDiscreteScheduler(prediction_type='sample')
__A : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='quick_gelu' , projection_dim=512 , )
__A : List[str] = CLIPTextModel(_UpperCAmelCase)
__A : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__A : List[str] = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=0):
'''simple docstring'''
if str(_UpperCAmelCase).startswith('mps'):
__A : Optional[Any] = torch.manual_seed(_UpperCAmelCase)
else:
__A : Tuple = torch.Generator(device=_UpperCAmelCase).manual_seed(_UpperCAmelCase)
__A : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = 'cpu'
__A : str = self.get_dummy_components()
__A : Optional[Any] = self.pipeline_class(**_UpperCAmelCase)
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : Optional[Any] = self.get_dummy_inputs(_UpperCAmelCase)
__A : List[str] = pipe(**_UpperCAmelCase).images
__A : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3))
__A : Any = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055])
__A : List[Any] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_UpperCAmelCase , 1e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
__A : int = self.get_dummy_components()
__A : Union[str, Any] = self.pipeline_class(**_UpperCAmelCase)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_UpperCAmelCase)
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : int = self.get_dummy_inputs(_UpperCAmelCase)
__A : Union[str, Any] = 2
__A : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__A : List[str] = getattr(_UpperCAmelCase , scheduler_enum.name)
__A : Optional[Any] = scheduler_cls.from_config(pipe.scheduler.config)
__A : Any = pipe(**_UpperCAmelCase)[0]
outputs.append(_UpperCAmelCase)
assert check_same_shape(_UpperCAmelCase)
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = torch.manual_seed(33)
__A : List[str] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa)
pipe.to('cuda')
__A : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa)
upscaler.to('cuda')
__A : Any = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
__A : Any = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , output_type='latent').images
__A : int = upscaler(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCAmelCase , output_type='np' , ).images[0]
__A : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy')
assert np.abs((expected_image - image).mean()) < 5e-2
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = torch.manual_seed(33)
__A : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa)
upscaler.to('cuda')
__A : Any = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
__A : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png')
__A : int = upscaler(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCAmelCase , output_type='np' , ).images[0]
__A : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy')
assert np.abs((expected_image - image).max()) < 5e-2 | 8 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 1 |
import requests
snake_case : int = '' # <-- Put your OpenWeatherMap appid here!
snake_case : int = 'https://api.openweathermap.org/data/2.5/'
def snake_case__ ( __lowercase = "Chicago" , __lowercase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def snake_case__ ( __lowercase = "Kolkata, India" , __lowercase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def snake_case__ ( __lowercase = 55.68 , __lowercase = 12.57 , __lowercase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
snake_case : int = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break | 182 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowerCAmelCase__ ( UpperCamelCase ):
def _lowercase ( self : List[Any]):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowercase ( self : Tuple):
A__ : Any = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(_A)
def _lowercase ( self : Union[str, Any]):
A__ : str = self._create_example_records()
A__ : Dict = Dataset.from_list(_A)
self.assertListEqual(dset.column_names , ["col_1", "col_2"])
for i, r in enumerate(_A):
self.assertDictEqual(_A , example_records[i])
def _lowercase ( self : List[str]):
A__ : List[str] = self._create_example_records()
A__ : Any = Dataset.from_list(_A)
A__ : Optional[int] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def _lowercase ( self : Any): # checks what happens with missing columns
A__ : Dict = [{"col_1": 1}, {"col_2": "x"}]
A__ : Any = Dataset.from_list(_A)
self.assertDictEqual(dset[0] , {"col_1": 1})
self.assertDictEqual(dset[1] , {"col_1": None}) # NB: first record is used for columns
def _lowercase ( self : str): # checks if the type can be inferred from the second record
A__ : Dict = [{"col_1": []}, {"col_1": [1, 2]}]
A__ : Dict = Dataset.from_list(_A)
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64")))
def _lowercase ( self : Union[str, Any]):
A__ : Dict = Dataset.from_list([])
self.assertEqual(len(_A) , 0)
self.assertListEqual(dset.column_names , []) | 182 | 1 |
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _lowerCamelCase( __snake_case ) -> str:
re.sub("<n>" , "" , __snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__snake_case ) )
| 524 | from __future__ import annotations
def _lowerCamelCase( __snake_case , __snake_case , __snake_case ) -> tuple[float, list[float]]:
__snake_case = list(range(len(__snake_case ) ) )
__snake_case = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__snake_case = 0
__snake_case = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__snake_case = 1
max_value += value[i]
capacity -= weight[i]
else:
__snake_case = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 524 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ (__lowerCAmelCase , __lowerCAmelCase ) -> list[tuple[int, int]]:
'''simple docstring'''
lowercase_ , lowercase_ = position
lowercase_ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase_ = []
for position in positions:
lowercase_ , lowercase_ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__lowerCAmelCase )
return permissible_positions
def UpperCamelCase_ (__lowerCAmelCase ) -> bool:
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def UpperCamelCase_ (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
'''simple docstring'''
if is_complete(__lowerCAmelCase ):
return True
for position in get_valid_pos(__lowerCAmelCase , len(__lowerCAmelCase ) ):
lowercase_ , lowercase_ = position
if board[y][x] == 0:
lowercase_ = curr + 1
if open_knight_tour_helper(__lowerCAmelCase , __lowerCAmelCase , curr + 1 ):
return True
lowercase_ = 0
return False
def UpperCamelCase_ (__lowerCAmelCase ) -> list[list[int]]:
'''simple docstring'''
lowercase_ = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowercase_ = 1
if open_knight_tour_helper(__lowerCAmelCase , (i, j) , 1 ):
return board
lowercase_ = 0
lowercase_ = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]=1_3 , lowerCAmelCase_ : Union[str, Any]=7 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : List[str]=3_7 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=5_1_2 , lowerCAmelCase_ : Union[str, Any]=1_6 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple="None" , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = relative_attention
lowercase_ = position_biased_input
lowercase_ = pos_att_type
lowercase_ = scope
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Dict):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size()) , [])
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = DebertaVaModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)[0]
lowercase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)[0]
lowercase_ = model(lowerCAmelCase_)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = DebertaVaForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = DebertaVaForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = DebertaVaForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = DebertaVaForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = DebertaVaForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = DebertaVaModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DebertaVaModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
pass
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""")
lowercase_ = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
lowercase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_)[0]
# compare the actual values for a slice.
lowercase_ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4) , F'''{output[:, 1:4, 1:4]}''')
| 100 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 591 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=False , A__=True , A__="None" , A__=3 , A__=4 , A__=None , ) -> int:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase ( self , A__ ) -> List[str]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = DebertaVaModel(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ , token_type_ids=A__ )[0]
_SCREAMING_SNAKE_CASE = model(A__ , token_type_ids=A__ )[0]
_SCREAMING_SNAKE_CASE = model(A__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = DebertaVaForMaskedLM(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = DebertaVaForSequenceClassification(A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(A__ )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = DebertaVaForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = DebertaVaForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(
A__ , attention_mask=A__ , token_type_ids=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = DebertaVaForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = DebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*A__ )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*A__ )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*A__ )
@slow
def UpperCamelCase ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a (unittest.TestCase):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self ) -> Union[str, Any]:
pass
@slow
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A__ , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
| 591 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase : str = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
'''simple docstring'''
UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase : list[bool | None] = [None] * 10_00_00_00
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = False
def A__ ( __lowerCAmelCase : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) )
lowerCamelCase__ = number_chain
while number < 1000_0000:
lowerCamelCase__ = number_chain
number *= 10
return number_chain
def A__ ( __lowerCAmelCase : int = 1000_0000 ):
for i in range(1 , __lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 9 | 1 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ = 1.6_021e-19 # units = C
def _A ( A__ , A__ , A__ , ):
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | """simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase ):
super().__init__()
__a = nn.ModuleList(lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase , lowerCamelCase , self.nets ) ):
__a , __a = controlnet(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
# merge samples
if i == 0:
__a , __a = down_samples, mid_sample
else:
__a = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase , lowerCamelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , ):
__a = 0
__a = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase , is_main_process=lowerCamelCase , save_function=lowerCamelCase , safe_serialization=lowerCamelCase , variant=lowerCamelCase , )
idx += 1
__a = model_path_to_save + F"_{idx}"
@classmethod
def a__ ( cls , lowerCamelCase , **lowerCamelCase ):
__a = 0
__a = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__a = pretrained_model_path
while os.path.isdir(lowerCamelCase ):
__a = ControlNetModel.from_pretrained(lowerCamelCase , **lowerCamelCase )
controlnets.append(lowerCamelCase )
idx += 1
__a = pretrained_model_path + F"_{idx}"
logger.info(F"{len(lowerCamelCase )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(lowerCamelCase )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase )
| 528 | 0 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=64 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> str:
lowercase_ : str = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Tuple = seq_length
lowercase_ : str = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : Tuple = use_token_type_ids
lowercase_ : int = use_labels
lowercase_ : Tuple = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Any = embedding_size
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : str = type_vocab_size
lowercase_ : int = type_sequence_label_size
lowercase_ : List[str] = initializer_range
lowercase_ : Union[str, Any] = num_labels
lowercase_ : List[str] = num_choices
lowercase_ : Dict = scope
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Union[str, Any] = None
if self.use_input_mask:
lowercase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : List[str] = None
if self.use_token_type_ids:
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Tuple = None
lowercase_ : Any = None
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ) -> List[str]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
lowercase_ : Union[str, Any] = MegatronBertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Tuple = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
lowercase_ : List[str] = model(_lowercase , token_type_ids=_lowercase )
lowercase_ : List[Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
lowercase_ : Optional[Any] = MegatronBertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Union[str, Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
lowercase_ : Dict = MegatronBertForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Union[str, Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : Optional[Any] = MegatronBertForNextSentencePrediction(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Dict = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
lowercase_ : Tuple = MegatronBertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Union[str, Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , next_sentence_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
lowercase_ : Optional[Any] = MegatronBertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : List[str] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
lowercase_ : Dict = self.num_labels
lowercase_ : Optional[int] = MegatronBertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : str = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : str = self.num_labels
lowercase_ : List[str] = MegatronBertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Optional[int] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = self.num_choices
lowercase_ : Tuple = MegatronBertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Tuple = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
(
lowercase_
) : Tuple = config_and_inputs
lowercase_ : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : List[str] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Any = True
# test_resize_embeddings = False
SCREAMING_SNAKE_CASE_ : List[str] = False
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase=False ) -> Dict:
lowercase_ : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
lowercase_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
lowercase_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Optional[Any] = MegatronBertModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def lowerCamelCase__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowercase )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowercase )
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowercase )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowercase )
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowercase )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowercase )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowercase )
def _UpperCAmelCase ( a : List[Any] ) -> List[Any]:
"""simple docstring"""
return torch.tensor(
a , dtype=torch.long , device=a , )
A: Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.' )
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Optional[int] = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
lowercase_ : Optional[Any] = os.path.join(os.environ['MYDIR'] , _lowercase )
lowercase_ : List[str] = MegatronBertModel.from_pretrained(_lowercase )
model.to(_lowercase )
model.half()
lowercase_ : Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowercase_ : List[Any] = model(_lowercase )[0]
lowercase_ : int = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , _lowercase )
lowercase_ : Union[str, Any] = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
lowercase_ : int = output[0, ii, jj]
lowercase_ : Any = expected[3 * ii + jj]
lowercase_ : int = 'ii={} jj={} a={} b={}'.format(_lowercase , _lowercase , _lowercase , _lowercase )
self.assertTrue(math.isclose(_lowercase , _lowercase , rel_tol=_lowercase , abs_tol=_lowercase ) , msg=_lowercase )
| 721 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__lowerCamelCase = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class _snake_case ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , a = " " ) -> str:
"""simple docstring"""
_A = sentence_delimiter
def lowercase_ ( self , a ) -> int:
"""simple docstring"""
return list(a )
def lowercase_ ( self , a ) -> Optional[Any]:
"""simple docstring"""
_A = []
for sent_idx, sentence in enumerate(a ):
chars.extend(self.process_string(a ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(a ) - 1:
chars.append(self.sentence_delimiter )
return chars
__lowerCamelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__lowerCamelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__lowerCamelCase = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
__lowerCamelCase = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
__lowerCamelCase = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def lowercase_ ( self , a , a , a=False ) -> List[Any]:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
a , a , truth_transform=a , hypothesis_transform=a , )["wer"]
_A = 0
_A = 0
for prediction, reference in zip(a , a ):
_A = jiwer.compute_measures(
a , a , truth_transform=a , hypothesis_transform=a , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 317 |
from __future__ import annotations
import math
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
)
def UpperCAmelCase__ ( ) -> None:
_A = [90, 23, 6, 33, 21, 65, 123, 34_423]
_A = math.log(len(__snake_case ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __snake_case , __snake_case , __snake_case )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 317 | 1 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_A : int = get_logger(__name__)
_A : List[Any] = Path(__file__).parent / 'model_card_template.md'
_A : Tuple = uuida().hex
_A : int = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_A : List[Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_A : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _a ( UpperCAmelCase = None ) -> str:
"""simple docstring"""
lowerCamelCase__ : Any = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(UpperCAmelCase , UpperCAmelCase ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
ua += "; " + user_agent
return ua
def _a ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> Tuple:
"""simple docstring"""
if token is None:
lowerCamelCase__ : str = HfFolder.get_token()
if organization is None:
lowerCamelCase__ : Optional[int] = whoami(UpperCAmelCase )['''name''']
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(UpperCAmelCase , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
lowerCamelCase__ : str = args.hub_token if hasattr(UpperCAmelCase , '''hub_token''' ) else None
lowerCamelCase__ : Dict = get_full_repo_name(UpperCAmelCase , token=UpperCAmelCase )
lowerCamelCase__ : str = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=UpperCAmelCase , model_name=UpperCAmelCase , repo_name=UpperCAmelCase , dataset_name=args.dataset_name if hasattr(UpperCAmelCase , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(UpperCAmelCase , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(UpperCAmelCase , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(UpperCAmelCase , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(UpperCAmelCase , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(UpperCAmelCase , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(UpperCAmelCase , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(UpperCAmelCase , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(UpperCAmelCase , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(UpperCAmelCase , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(UpperCAmelCase , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
lowerCamelCase__ : int = os.path.join(args.output_dir , '''README.md''' )
model_card.save(UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase = None ) -> str:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
lowerCamelCase__ : str = str(Path(UpperCAmelCase ).as_posix() )
lowerCamelCase__ : Tuple = re.search(R'''snapshots/([^/]+)/''' , UpperCAmelCase )
if search is None:
return None
lowerCamelCase__ : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(UpperCAmelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_A : Optional[Any] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_A : Any = os.path.join(hf_cache_home, 'diffusers')
def _a ( UpperCAmelCase = None , UpperCAmelCase = None ) -> None:
"""simple docstring"""
if new_cache_dir is None:
lowerCamelCase__ : List[str] = DIFFUSERS_CACHE
if old_cache_dir is None:
lowerCamelCase__ : List[str] = old_diffusers_cache
lowerCamelCase__ : List[Any] = Path(UpperCAmelCase ).expanduser()
lowerCamelCase__ : str = Path(UpperCAmelCase ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowerCamelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(UpperCAmelCase )
new_blob_path.parent.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
os.replace(UpperCAmelCase , UpperCAmelCase )
try:
os.symlink(UpperCAmelCase , UpperCAmelCase )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_A : List[str] = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_A : Any = 0
else:
with open(cache_version_file) as f:
try:
_A : Tuple = int(f.read())
except ValueError:
_A : Any = 0
if cache_version < 1:
_A : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_A : Dict = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def _a ( UpperCAmelCase , UpperCAmelCase = None ) -> str:
"""simple docstring"""
if variant is not None:
lowerCamelCase__ : Any = weights_name.split('''.''' )
lowerCamelCase__ : Any = splits[:-1] + [variant] + splits[-1:]
lowerCamelCase__ : Any = '''.'''.join(UpperCAmelCase )
return weights_name
def _a ( UpperCAmelCase , *,
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Dict = str(UpperCAmelCase )
if os.path.isfile(UpperCAmelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(UpperCAmelCase ):
if os.path.isfile(os.path.join(UpperCAmelCase , UpperCAmelCase ) ):
# Load from a PyTorch checkpoint
lowerCamelCase__ : Any = os.path.join(UpperCAmelCase , UpperCAmelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ):
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(UpperCAmelCase ).base_version ) >= version.parse('''0.20.0''' )
):
try:
lowerCamelCase__ : Union[str, Any] = hf_hub_download(
UpperCAmelCase , filename=_add_variant(UpperCAmelCase , UpperCAmelCase ) , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , use_auth_token=UpperCAmelCase , user_agent=UpperCAmelCase , subfolder=UpperCAmelCase , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , UpperCAmelCase , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(UpperCAmelCase , UpperCAmelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(UpperCAmelCase , UpperCAmelCase )}' so that the correct variant file can be added." , UpperCAmelCase , )
try:
# 2. Load model file as usual
lowerCamelCase__ : List[str] = hf_hub_download(
UpperCAmelCase , filename=UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , use_auth_token=UpperCAmelCase , user_agent=UpperCAmelCase , subfolder=UpperCAmelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'''this model name. Check the model page at '''
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 130 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _a ( UpperCAmelCase ) -> bool:
"""simple docstring"""
lowerCamelCase__ : int = int(number**0.5 )
return number == sq * sq
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> tuple[int, int]:
"""simple docstring"""
lowerCamelCase__ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCamelCase__ : int = x_den * y_den * z_den
lowerCamelCase__ : int = gcd(UpperCAmelCase , UpperCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def _a ( UpperCAmelCase = 35 ) -> int:
"""simple docstring"""
lowerCamelCase__ : set = set()
lowerCamelCase__ : int
lowerCamelCase__ : Fraction = Fraction(0 )
lowerCamelCase__ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCamelCase__ : Optional[Any] = x_num * y_den + x_den * y_num
lowerCamelCase__ : Dict = x_den * y_den
lowerCamelCase__ : Union[str, Any] = gcd(UpperCAmelCase , UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase__ : List[Any] = add_three(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
unique_s.add(UpperCAmelCase )
# n=2
lowerCamelCase__ : List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCamelCase__ : Optional[Any] = x_den * x_den * y_den * y_den
if is_sq(UpperCAmelCase ) and is_sq(UpperCAmelCase ):
lowerCamelCase__ : List[str] = int(sqrt(UpperCAmelCase ) )
lowerCamelCase__ : Tuple = int(sqrt(UpperCAmelCase ) )
lowerCamelCase__ : Any = gcd(UpperCAmelCase , UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase__ : Tuple = add_three(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
unique_s.add(UpperCAmelCase )
# n=-1
lowerCamelCase__ : Optional[Any] = x_num * y_num
lowerCamelCase__ : List[Any] = x_den * y_num + x_num * y_den
lowerCamelCase__ : Union[str, Any] = gcd(UpperCAmelCase , UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase__ : str = add_three(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
unique_s.add(UpperCAmelCase )
# n=2
lowerCamelCase__ : Optional[int] = x_num * x_num * y_num * y_num
lowerCamelCase__ : Union[str, Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(UpperCAmelCase ) and is_sq(UpperCAmelCase ):
lowerCamelCase__ : List[str] = int(sqrt(UpperCAmelCase ) )
lowerCamelCase__ : Dict = int(sqrt(UpperCAmelCase ) )
lowerCamelCase__ : str = gcd(UpperCAmelCase , UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase__ : Optional[Any] = add_three(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
unique_s.add(UpperCAmelCase )
for num, den in unique_s:
total += Fraction(UpperCAmelCase , UpperCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 130 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def a__ ( a__=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(add_help=a__ , allow_abbrev=a__ )
# The main config parser
__SCREAMING_SNAKE_CASE = config_command_parser(a__ )
# The subparser to add commands to
__SCREAMING_SNAKE_CASE = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(a__ , parents=[parent_parser] )
update_command_parser(a__ , parents=[parent_parser] )
return config_parser
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_config_parser()
__SCREAMING_SNAKE_CASE = config_parser.parse_args()
if not hasattr(a__ , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(a__ )
if __name__ == "__main__":
main()
| 627 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = data
def __iter__( self : Any ) -> Dict:
"""simple docstring"""
for element in self.data:
yield element
def a__ ( a__=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator(even_batches=a__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( a__ , a__ , a__ , a__ = False ):
"""simple docstring"""
if iterable:
__SCREAMING_SNAKE_CASE = DummyIterableDataset(torch.as_tensor(range(a__ ) ) )
else:
__SCREAMING_SNAKE_CASE = TensorDataset(torch.as_tensor(range(a__ ) ) )
__SCREAMING_SNAKE_CASE = DataLoader(a__ , batch_size=a__ )
__SCREAMING_SNAKE_CASE = accelerator.prepare(a__ )
return dl
def a__ ( a__ , a__ , a__ , a__ , a__ , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = create_dataloader(accelerator=a__ , dataset_size=a__ , batch_size=a__ )
__SCREAMING_SNAKE_CASE = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
a__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
a__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = create_accelerator(even_batches=a__ )
verify_dataloader_batch_sizes(
a__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
a__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = create_accelerator(even_batches=a__ )
__SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 )
__SCREAMING_SNAKE_CASE = accelerator.prepare(a__ )
__SCREAMING_SNAKE_CASE = create_dataloader(a__ , dataset_size=3 , batch_size=1 )
__SCREAMING_SNAKE_CASE = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(a__ ):
__SCREAMING_SNAKE_CASE = ddp_model(batch[0].float() )
__SCREAMING_SNAKE_CASE = output.sum()
loss.backward()
batch_idxs.append(a__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( a__ ):
"""simple docstring"""
with warnings.catch_warnings(record=a__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , a__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = create_accelerator(even_batches=a__ )
__SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 )
__SCREAMING_SNAKE_CASE = accelerator.prepare(a__ )
__SCREAMING_SNAKE_CASE = create_dataloader(a__ , dataset_size=3 , batch_size=1 )
__SCREAMING_SNAKE_CASE = create_dataloader(a__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a__ ):
__SCREAMING_SNAKE_CASE = train_dl.batch_sampler.even_batches
__SCREAMING_SNAKE_CASE = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = create_accelerator(even_batches=a__ )
__SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 )
__SCREAMING_SNAKE_CASE = accelerator.prepare(a__ )
create_dataloader(a__ , dataset_size=3 , batch_size=1 , iterable=a__ )
__SCREAMING_SNAKE_CASE = create_dataloader(a__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a__ ):
__SCREAMING_SNAKE_CASE = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = create_accelerator()
__SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 )
__SCREAMING_SNAKE_CASE = accelerator.prepare(a__ )
create_dataloader(a__ , dataset_size=3 , batch_size=1 , iterable=a__ )
with warnings.catch_warnings(record=a__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a__ ):
pass
assert issubclass(w[-1].category , a__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
__SCREAMING_SNAKE_CASE = accelerator.state.distributed_type
__SCREAMING_SNAKE_CASE = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(a__ )
__SCREAMING_SNAKE_CASE = original_state
if __name__ == "__main__":
main()
| 627 | 1 |
'''simple docstring'''
from random import randint, random
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False , lowercase__ = False , lowercase__ = 5 , ):
'''simple docstring'''
a_ =[[-1] * number_of_cells] # Create a highway without any car
a_ =0
a_ =max(__lowerCAmelCase , 0 )
while i < number_of_cells:
a_ =(
randint(0 , __lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =0
a_ =highway_now[car_index + 1 :]
for cell in range(len(__lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__lowerCAmelCase , -1 )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =len(__lowerCAmelCase )
# Beforce calculations, the highway is empty
a_ =[-1] * number_of_cells
for car_index in range(__lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
a_ =min(highway_now[car_index] + 1 , __lowerCAmelCase )
# Number of empty cell before the next car
a_ =get_distance(__lowerCAmelCase , __lowerCAmelCase ) - 1
# We can't have the car causing an accident
a_ =min(next_highway[car_index] , __lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
a_ =max(next_highway[car_index] - 1 , 0 )
return next_highway
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =len(highway[0] )
for i in range(__lowerCAmelCase ):
a_ =update(highway[i] , __lowerCAmelCase , __lowerCAmelCase )
a_ =[-1] * number_of_cells
for car_index in range(__lowerCAmelCase ):
a_ =next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
a_ =(car_index + speed) % number_of_cells
# Commit the change of position
a_ =speed
highway.append(__lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 0 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
lowerCAmelCase_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowerCAmelCase_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowerCAmelCase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase_ = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 60 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = 42
A = None
def __snake_case (__UpperCAmelCase , __UpperCAmelCase=0.999 , __UpperCAmelCase="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowerCamelCase_ : Union[str, Any] = []
for i in range(__UpperCAmelCase ):
lowerCamelCase_ : Tuple = i / num_diffusion_timesteps
lowerCamelCase_ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class lowerCAmelCase__ ( _lowerCAmelCase ,_lowerCAmelCase ):
@register_to_config
def __init__( self : Any , UpperCamelCase_ : int = 1_000 , UpperCamelCase_ : str = "fixed_small_log" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[float] = 1.0 , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : str = "squaredcos_cap_v2" , ) -> Any:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
lowerCamelCase_ : Tuple = betas_for_alpha_bar(UpperCamelCase_ )
lowerCamelCase_ : int = 1.0 - self.betas
lowerCamelCase_ : Any = torch.cumprod(self.alphas , dim=0 )
lowerCamelCase_ : Optional[Any] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowerCamelCase_ : int = 1.0
# setable values
lowerCamelCase_ : str = None
lowerCamelCase_ : Any = torch.from_numpy(np.arange(0 , UpperCamelCase_ )[::-1].copy() )
lowerCamelCase_ : str = variance_type
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def __UpperCamelCase ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, torch.device] = None ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = num_inference_steps
lowerCamelCase_ : Optional[Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowerCamelCase_ : Tuple = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowerCamelCase_ : Dict = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
def __UpperCamelCase ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : List[str]=None ) -> Any:
"""simple docstring"""
if prev_timestep is None:
lowerCamelCase_ : int = t - 1
lowerCamelCase_ : List[Any] = self.alphas_cumprod[t]
lowerCamelCase_ : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCamelCase_ : Any = 1 - alpha_prod_t
lowerCamelCase_ : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCamelCase_ : int = self.betas[t]
else:
lowerCamelCase_ : str = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase_ : List[Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowerCamelCase_ : Tuple = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowerCamelCase_ : int = torch.log(torch.clamp(UpperCamelCase_ , min=1e-2_0 ) )
lowerCamelCase_ : Tuple = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowerCamelCase_ : List[str] = variance.log()
lowerCamelCase_ : Tuple = beta.log()
lowerCamelCase_ : Optional[Any] = (predicted_variance + 1) / 2
lowerCamelCase_ : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def __UpperCamelCase ( self : str , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Any=None , UpperCamelCase_ : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
lowerCamelCase_ : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 )
else:
lowerCamelCase_ : Dict = None
# 1. compute alphas, betas
if prev_timestep is None:
lowerCamelCase_ : Optional[Any] = t - 1
lowerCamelCase_ : Any = self.alphas_cumprod[t]
lowerCamelCase_ : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCamelCase_ : Optional[Any] = 1 - alpha_prod_t
lowerCamelCase_ : List[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCamelCase_ : Any = self.betas[t]
lowerCamelCase_ : Optional[int] = self.alphas[t]
else:
lowerCamelCase_ : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
lowerCamelCase_ : Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase_ : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase_ : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase_ : List[str] = torch.clamp(
UpperCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ : List[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowerCamelCase_ : List[str] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCamelCase_ : List[str] = 0
if t > 0:
lowerCamelCase_ : Optional[int] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ , device=model_output.device )
lowerCamelCase_ : Optional[int] = self._get_variance(
UpperCamelCase_ , predicted_variance=UpperCamelCase_ , prev_timestep=UpperCamelCase_ , )
if self.variance_type == "fixed_small_log":
lowerCamelCase_ : str = variance
elif self.variance_type == "learned_range":
lowerCamelCase_ : List[Any] = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
''' for the UnCLIPScheduler.''' )
lowerCamelCase_ : int = variance * variance_noise
lowerCamelCase_ : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.IntTensor , ) -> torch.FloatTensor:
"""simple docstring"""
lowerCamelCase_ : str = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowerCamelCase_ : List[Any] = timesteps.to(original_samples.device )
lowerCamelCase_ : Dict = alphas_cumprod[timesteps] ** 0.5
lowerCamelCase_ : List[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowerCamelCase_ : Union[str, Any] = sqrt_alpha_prod.unsqueeze(-1 )
lowerCamelCase_ : int = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCamelCase_ : Any = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowerCamelCase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowerCamelCase_ : Tuple = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 501 | 0 |
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , _lowerCAmelCase : Tuple ):
__snake_case : Optional[Any] = size
# approximate the overall size of segment tree with given value
__snake_case : Tuple = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
__snake_case : List[Any] = [0 for i in range(0 , 4 * size )]
__snake_case : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def snake_case__ ( self : int , _lowerCAmelCase : List[str] ):
return idx * 2
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[str] ):
return idx * 2 + 1
def snake_case__ ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ):
if left_element == right_element:
__snake_case : Dict = a[left_element - 1]
else:
__snake_case : Optional[int] = (left_element + right_element) // 2
self.build(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.build(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Dict = max(
self.segment_tree[self.left(_lowerCAmelCase )] , self.segment_tree[self.right(_lowerCAmelCase )] )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
if self.flag[idx] is True:
__snake_case : Tuple = self.lazy[idx]
__snake_case : str = False
if left_element != right_element:
__snake_case : Dict = self.lazy[idx]
__snake_case : Optional[Any] = self.lazy[idx]
__snake_case : int = True
__snake_case : int = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__snake_case : Tuple = val
if left_element != right_element:
__snake_case : List[Any] = val
__snake_case : Dict = val
__snake_case : List[str] = True
__snake_case : List[Any] = True
return True
__snake_case : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.update(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : List[Any] = max(
self.segment_tree[self.left(_lowerCAmelCase )] , self.segment_tree[self.right(_lowerCAmelCase )] )
return True
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] ):
if self.flag[idx] is True:
__snake_case : Any = self.lazy[idx]
__snake_case : Dict = False
if left_element != right_element:
__snake_case : str = self.lazy[idx]
__snake_case : str = self.lazy[idx]
__snake_case : List[Any] = True
__snake_case : List[str] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__snake_case : Optional[Any] = (left_element + right_element) // 2
__snake_case : str = self.query(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Optional[int] = self.query(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return max(_lowerCAmelCase , _lowerCAmelCase )
def __str__( self : Union[str, Any] ):
return str([self.query(1 , 1 , self.size , _lowerCAmelCase , _lowerCAmelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowercase_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase_ = 15
lowercase_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt)
| 708 | from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __lt__( self : Tuple , _lowerCAmelCase : Optional[int] ):
return self[-1] < other[-1]
def __eq__( self : Tuple , _lowerCAmelCase : Tuple ):
return self[-1] == other[-1]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
__snake_case : list[Stack] = []
# sort into stacks
for element in collection:
__snake_case : Dict = Stack([element] )
__snake_case : int = bisect_left(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if i != len(__SCREAMING_SNAKE_CASE ):
stacks[i].append(__SCREAMING_SNAKE_CASE )
else:
stacks.append(__SCREAMING_SNAKE_CASE )
# use a heap-based merge to merge stack efficiently
__snake_case : int = merge(*(reversed(__SCREAMING_SNAKE_CASE ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 390 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : List[Any] = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """unispeech-sat"""
def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.1 , A_=0.1 , A_=0.02 , A_=1e-5 , A_="group" , A_="gelu" , A_=(512, 512, 512, 512, 512, 512, 512) , A_=(5, 2, 2, 2, 2, 2, 2) , A_=(10, 3, 3, 3, 3, 2, 2) , A_=False , A_=128 , A_=16 , A_=False , A_=True , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_=320 , A_=2 , A_=0.1 , A_=100 , A_=256 , A_=256 , A_=0.1 , A_="mean" , A_=False , A_=False , A_=256 , A_=(512, 512, 512, 512, 1500) , A_=(5, 3, 3, 1, 1) , A_=(1, 2, 3, 1, 1) , A_=512 , A_=0 , A_=1 , A_=2 , A_=504 , **A_ , )-> Tuple:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = num_clusters
UpperCamelCase = do_stable_layer_norm
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase = num_codevectors_per_group
UpperCamelCase = num_codevector_groups
UpperCamelCase = contrastive_logits_temperature
UpperCamelCase = feat_quantizer_dropout
UpperCamelCase = num_negatives
UpperCamelCase = codevector_dim
UpperCamelCase = proj_codevector_dim
UpperCamelCase = diversity_loss_weight
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 3 | 1 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = OmegaConf.load(SCREAMING_SNAKE_CASE_ )
if display:
print(yaml.dump(OmegaConf.to_container(SCREAMING_SNAKE_CASE_ ) ) )
return config
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ) -> Tuple:
if conf_path is None:
SCREAMING_SNAKE_CASE = './model_checkpoints/vqgan_only.yaml'
SCREAMING_SNAKE_CASE = load_config(SCREAMING_SNAKE_CASE_ , display=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = VQModel(**config.model.params )
if ckpt_path is None:
SCREAMING_SNAKE_CASE = './model_checkpoints/vqgan_only.pt'
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location=SCREAMING_SNAKE_CASE_ )
if ".ckpt" in ckpt_path:
SCREAMING_SNAKE_CASE = sd['state_dict']
model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
del sd
return model
def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.encode(SCREAMING_SNAKE_CASE_ )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
SCREAMING_SNAKE_CASE = model.decode(SCREAMING_SNAKE_CASE_ )
return xrec
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple=False ) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = string.rsplit('.' , 1 )
if reload:
SCREAMING_SNAKE_CASE = importlib.import_module(SCREAMING_SNAKE_CASE_ )
importlib.reload(SCREAMING_SNAKE_CASE_ )
return getattr(importlib.import_module(SCREAMING_SNAKE_CASE_ , package=SCREAMING_SNAKE_CASE_ ) , cls )
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
if "target" not in config:
raise KeyError('Expected key `target` to instantiate.' )
return get_obj_from_str(config['target'] )(**config.get('params' , {} ) )
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True ) -> List[str]:
SCREAMING_SNAKE_CASE = instantiate_from_config(SCREAMING_SNAKE_CASE_ )
if sd is not None:
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
# load the specified checkpoint
if ckpt:
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
SCREAMING_SNAKE_CASE = pl_sd['global_step']
print(F'loaded model from global step {global_step}.' )
else:
SCREAMING_SNAKE_CASE = {'state_dict': None}
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = load_model_from_config(config.model , pl_sd['state_dict'] , gpu=SCREAMING_SNAKE_CASE_ , eval_mode=SCREAMING_SNAKE_CASE_ )['model']
return model, global_step
| 701 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=lowerCamelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=lowerCamelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __A ( self ) -> Tuple:
if self.train_file is not None:
SCREAMING_SNAKE_CASE = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
SCREAMING_SNAKE_CASE = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
def __call__( self , lowerCAmelCase__ ) -> Tuple:
SCREAMING_SNAKE_CASE = 'label' if 'label' in features[0].keys() else 'labels'
SCREAMING_SNAKE_CASE = [feature.pop(lowerCAmelCase__ ) for feature in features]
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = len(features[0]['input_ids'] )
SCREAMING_SNAKE_CASE = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features
]
SCREAMING_SNAKE_CASE = list(chain(*lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = self.tokenizer.pad(
lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
SCREAMING_SNAKE_CASE = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
SCREAMING_SNAKE_CASE = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa )
return batch
def lowercase () -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
SCREAMING_SNAKE_CASE = {}
if data_args.train_file is not None:
SCREAMING_SNAKE_CASE = data_args.train_file
if data_args.validation_file is not None:
SCREAMING_SNAKE_CASE = data_args.validation_file
SCREAMING_SNAKE_CASE = data_args.train_file.split('.' )[-1]
SCREAMING_SNAKE_CASE = load_dataset(
SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
SCREAMING_SNAKE_CASE = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
SCREAMING_SNAKE_CASE = [F'ending{i}' for i in range(4 )]
SCREAMING_SNAKE_CASE = 'sent1'
SCREAMING_SNAKE_CASE = 'sent2'
if data_args.max_seq_length is None:
SCREAMING_SNAKE_CASE = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
SCREAMING_SNAKE_CASE = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(SCREAMING_SNAKE_CASE_ : Dict ):
SCREAMING_SNAKE_CASE = [[context] * 4 for context in examples[context_name]]
SCREAMING_SNAKE_CASE = examples[question_header_name]
SCREAMING_SNAKE_CASE = [
[F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(SCREAMING_SNAKE_CASE_ )
]
# Flatten out
SCREAMING_SNAKE_CASE = list(chain(*SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = list(chain(*SCREAMING_SNAKE_CASE_ ) )
# Tokenize
SCREAMING_SNAKE_CASE = tokenizer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
SCREAMING_SNAKE_CASE = raw_datasets['train']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
SCREAMING_SNAKE_CASE = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
SCREAMING_SNAKE_CASE = raw_datasets['validation']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_eval_samples )
SCREAMING_SNAKE_CASE = eval_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
SCREAMING_SNAKE_CASE = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
SCREAMING_SNAKE_CASE = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(SCREAMING_SNAKE_CASE_ : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = eval_predictions
SCREAMING_SNAKE_CASE = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE = last_checkpoint
SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model() # Saves the tokenizer too for easy upload
SCREAMING_SNAKE_CASE = train_result.metrics
SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
SCREAMING_SNAKE_CASE = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics('train' , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('train' , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 327 | 0 |
from datetime import datetime as dt
import os
from github import Github
_lowercase = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def __lowerCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCamelCase__: Any = g.get_repo("""huggingface/transformers""" )
lowerCamelCase__: int = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCamelCase__: int = sorted([comment for comment in issue.get_comments()] , key=lambda _UpperCamelCase : i.created_at , reverse=_UpperCamelCase )
lowerCamelCase__: Union[str, Any] = comments[0] if len(_UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 306 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase ( _UpperCamelCase ) -> str:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase ) -> Any:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCamelCase__: Optional[int] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_UpperCamelCase , id=_UpperCamelCase )
| 306 | 1 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Tuple=1 ) -> Tuple:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _snake_case ( lowercase__ : int , lowercase__ : Tuple=0 ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = []
for old_item in old_list:
lowerCAmelCase_ :Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
lowerCAmelCase_ :str = new_item.replace("""in_layers.2""" , """conv1""" )
lowerCAmelCase_ :str = new_item.replace("""out_layers.0""" , """norm2""" )
lowerCAmelCase_ :Any = new_item.replace("""out_layers.3""" , """conv2""" )
lowerCAmelCase_ :int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
lowerCAmelCase_ :Union[str, Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
lowerCAmelCase_ :str = shave_segments(lowercase__ , n_shave_prefix_segments=lowercase__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _snake_case ( lowercase__ : Tuple , lowercase__ : Optional[Any]=0 ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = []
for old_item in old_list:
lowerCAmelCase_ :Tuple = old_item
lowerCAmelCase_ :Dict = new_item.replace("""norm.weight""" , """group_norm.weight""" )
lowerCAmelCase_ :Any = new_item.replace("""norm.bias""" , """group_norm.bias""" )
lowerCAmelCase_ :Union[str, Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
lowerCAmelCase_ :int = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
lowerCAmelCase_ :Tuple = shave_segments(lowercase__ , n_shave_prefix_segments=lowercase__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _snake_case ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : Optional[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : Optional[int]=None ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCAmelCase_ :Any = old_checkpoint[path]
lowerCAmelCase_ :Optional[int] = old_tensor.shape[0] // 3
lowerCAmelCase_ :Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCAmelCase_ :List[Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
lowerCAmelCase_ :Union[str, Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Any = old_tensor.split(channels // num_heads , dim=1 )
lowerCAmelCase_ :str = query.reshape(lowercase__ )
lowerCAmelCase_ :Optional[int] = key.reshape(lowercase__ )
lowerCAmelCase_ :Tuple = value.reshape(lowercase__ )
for path in paths:
lowerCAmelCase_ :Union[str, Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCAmelCase_ :str = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
lowerCAmelCase_ :Dict = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
lowerCAmelCase_ :int = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCAmelCase_ :int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCAmelCase_ :Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
lowerCAmelCase_ :Any = old_checkpoint[path["""old"""]]
def _snake_case ( lowercase__ : int , lowercase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = {}
lowerCAmelCase_ :Tuple = checkpoint["""time_embed.0.weight"""]
lowerCAmelCase_ :Dict = checkpoint["""time_embed.0.bias"""]
lowerCAmelCase_ :Optional[Any] = checkpoint["""time_embed.2.weight"""]
lowerCAmelCase_ :Tuple = checkpoint["""time_embed.2.bias"""]
lowerCAmelCase_ :Dict = checkpoint["""input_blocks.0.0.weight"""]
lowerCAmelCase_ :Optional[Any] = checkpoint["""input_blocks.0.0.bias"""]
lowerCAmelCase_ :Union[str, Any] = checkpoint["""out.0.weight"""]
lowerCAmelCase_ :Optional[Any] = checkpoint["""out.0.bias"""]
lowerCAmelCase_ :Optional[int] = checkpoint["""out.2.weight"""]
lowerCAmelCase_ :int = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
lowerCAmelCase_ :Any = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
lowerCAmelCase_ :Dict = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowercase__ )
}
# Retrieves the keys for the middle blocks only
lowerCAmelCase_ :List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
lowerCAmelCase_ :Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowercase__ )
}
# Retrieves the keys for the output blocks only
lowerCAmelCase_ :List[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
lowerCAmelCase_ :List[str] = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowercase__ )
}
for i in range(1 , lowercase__ ):
lowerCAmelCase_ :Any = (i - 1) // (config["""num_res_blocks"""] + 1)
lowerCAmelCase_ :int = (i - 1) % (config["""num_res_blocks"""] + 1)
lowerCAmelCase_ :Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
lowerCAmelCase_ :Any = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
lowerCAmelCase_ :Tuple = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
lowerCAmelCase_ :Any = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
lowerCAmelCase_ :Dict = renew_resnet_paths(lowercase__ )
lowerCAmelCase_ :Optional[int] = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
lowerCAmelCase_ :Optional[Any] = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path, resnet_op] , config=lowercase__ )
if len(lowercase__ ):
lowerCAmelCase_ :Dict = renew_attention_paths(lowercase__ )
lowerCAmelCase_ :Any = {
"""old""": f"""input_blocks.{i}.1""",
"""new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCAmelCase_ :str = {
f"""input_blocks.{i}.1.qkv.bias""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , attention_paths_to_split=lowercase__ , config=lowercase__ , )
lowerCAmelCase_ :Union[str, Any] = middle_blocks[0]
lowerCAmelCase_ :Union[str, Any] = middle_blocks[1]
lowerCAmelCase_ :List[Any] = middle_blocks[2]
lowerCAmelCase_ :Optional[Any] = renew_resnet_paths(lowercase__ )
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , config=lowercase__ )
lowerCAmelCase_ :Optional[Any] = renew_resnet_paths(lowercase__ )
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , config=lowercase__ )
lowerCAmelCase_ :int = renew_attention_paths(lowercase__ )
lowerCAmelCase_ :List[Any] = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , attention_paths_to_split=lowercase__ , config=lowercase__ )
for i in range(lowercase__ ):
lowerCAmelCase_ :List[Any] = i // (config["""num_res_blocks"""] + 1)
lowerCAmelCase_ :Optional[Any] = i % (config["""num_res_blocks"""] + 1)
lowerCAmelCase_ :Any = [shave_segments(lowercase__ , 2 ) for name in output_blocks[i]]
lowerCAmelCase_ :List[Any] = {}
for layer in output_block_layers:
lowerCAmelCase_ , lowerCAmelCase_ :Dict = layer.split(""".""" )[0], shave_segments(lowercase__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowercase__ )
else:
lowerCAmelCase_ :List[Any] = [layer_name]
if len(lowercase__ ) > 1:
lowerCAmelCase_ :Tuple = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
lowerCAmelCase_ :str = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
lowerCAmelCase_ :str = renew_resnet_paths(lowercase__ )
lowerCAmelCase_ :Dict = renew_resnet_paths(lowercase__ )
lowerCAmelCase_ :List[Any] = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , config=lowercase__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCAmelCase_ :Dict = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
lowerCAmelCase_ :str = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
lowerCAmelCase_ :str = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowercase__ ) == 2:
lowerCAmelCase_ :Any = []
if len(lowercase__ ):
lowerCAmelCase_ :List[str] = renew_attention_paths(lowercase__ )
lowerCAmelCase_ :str = {
"""old""": f"""output_blocks.{i}.1""",
"""new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCAmelCase_ :int = {
f"""output_blocks.{i}.1.qkv.bias""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=lowercase__ , )
else:
lowerCAmelCase_ :str = renew_resnet_paths(lowercase__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCAmelCase_ :Optional[int] = """.""".join(["""output_blocks""", str(lowercase__ ), path["""old"""]] )
lowerCAmelCase_ :str = """.""".join(["""up_blocks""", str(lowercase__ ), """resnets""", str(lowercase__ ), path["""new"""]] )
lowerCAmelCase_ :Optional[Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCAmelCase = json.loads(f.read())
__UpperCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCAmelCase = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__UpperCAmelCase = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__UpperCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 256 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCAmelCase_ :Tuple = torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=__A , text_to_image_strength=0.7_5 , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
lowerCAmelCase_ :Any = VersatileDiffusionPipeline.from_pretrained(__A , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :str = generator.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = pipe.dual_guided(
prompt="""first prompt""" , image=__A , text_to_image_strength=0.7_5 , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :str = """cyberpunk 2077"""
lowerCAmelCase_ :Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCAmelCase_ :List[str] = torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = pipe.dual_guided(
prompt=__A , image=__A , text_to_image_strength=0.7_5 , generator=__A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowerCAmelCase_ :int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :List[str] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCAmelCase_ :List[str] = """A painting of a squirrel eating a burger """
lowerCAmelCase_ :Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase_ :Dict = pipe.text_to_image(
prompt=__A , generator=__A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
lowerCAmelCase_ :List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :str = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCAmelCase_ :Any = pipe.image_variation(__A , generator=__A , output_type="""numpy""" ).images
lowerCAmelCase_ :Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Union[str, Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 256 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
a__ : Any = tempfile.mkdtemp()
# fmt: off
a__ : str = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
a__ : Optional[Any] = dict(zip(A__ , range(len(A__ ) ) ) )
a__ : str = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
a__ : Optional[Any] = {'''unk_token''': '''<unk>'''}
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
a__ : Tuple = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
a__ : Optional[int] = os.path.join(self.tmpdirname , A__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A__ , A__ )
def __lowerCAmelCase ( self : Any , **A__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **A__ )
def __lowerCAmelCase ( self : Union[str, Any] , **A__ : str ) -> List[str]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **A__ )
def __lowerCAmelCase ( self : Tuple , **A__ : Tuple ) -> List[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **A__ )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
a__ : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
a__ : Union[str, Any] = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
a__ : Optional[Any] = self.get_tokenizer()
a__ : List[str] = self.get_rust_tokenizer()
a__ : str = self.get_image_processor()
a__ : Optional[int] = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
processor_slow.save_pretrained(self.tmpdirname )
a__ : Any = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=A__ )
a__ : List[str] = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
processor_fast.save_pretrained(self.tmpdirname )
a__ : List[str] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A__ )
self.assertIsInstance(processor_fast.tokenizer , A__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A__ )
self.assertIsInstance(processor_fast.image_processor , A__ )
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ : Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a__ : List[str] = self.get_image_processor(do_normalize=A__ )
a__ : List[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def __lowerCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
a__ : Dict = self.get_image_processor()
a__ : int = self.get_tokenizer()
a__ : Union[str, Any] = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
a__ : Optional[Any] = self.prepare_image_inputs()
a__ : Optional[int] = image_processor(A__ , return_tensors='''np''' )
a__ : Optional[int] = processor(images=A__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
a__ : str = self.get_image_processor()
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Optional[Any] = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
a__ : Tuple = '''lower newer'''
a__ : Optional[Any] = processor(text=A__ , return_tensors='''np''' )
a__ : Union[str, Any] = tokenizer(A__ , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __lowerCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
a__ : Tuple = self.get_image_processor()
a__ : str = self.get_tokenizer()
a__ : Union[str, Any] = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
a__ : Tuple = '''lower newer'''
a__ : str = self.prepare_image_inputs()
a__ : Any = processor(text=A__ , images=A__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def __lowerCAmelCase ( self : int ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = '''google/owlvit-base-patch32'''
a__ : List[Any] = OwlViTProcessor.from_pretrained(A__ )
a__ : List[str] = ['''cat''', '''nasa badge''']
a__ : Tuple = processor(text=A__ )
a__ : Dict = 1_6
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = '''google/owlvit-base-patch32'''
a__ : List[Any] = OwlViTProcessor.from_pretrained(A__ )
a__ : str = [['''cat''', '''nasa badge'''], ['''person''']]
a__ : Tuple = processor(text=A__ )
a__ : Any = 1_6
a__ : Tuple = len(A__ )
a__ : str = max([len(A__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def __lowerCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
a__ : str = '''google/owlvit-base-patch32'''
a__ : List[str] = OwlViTProcessor.from_pretrained(A__ )
a__ : str = ['''cat''', '''nasa badge''']
a__ : Any = processor(text=A__ )
a__ : Any = 1_6
a__ : List[str] = inputs['''input_ids''']
a__ : List[str] = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] = self.get_image_processor()
a__ : Tuple = self.get_tokenizer()
a__ : Any = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
a__ : Tuple = self.prepare_image_inputs()
a__ : Optional[int] = self.prepare_image_inputs()
a__ : int = processor(images=A__ , query_images=A__ )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = self.get_image_processor()
a__ : Any = self.get_tokenizer()
a__ : int = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
a__ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : str = processor.batch_decode(A__ )
a__ : Union[str, Any] = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
| 688 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ):
# Construct model
if gpta_config_file == "":
a__ : Union[str, Any] = GPTaConfig()
else:
a__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase__ )
a__ : Optional[int] = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
a__ : int = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
a__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 688 | 1 |
"""simple docstring"""
def A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if index == r:
for j in range(UpperCamelCase__ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_SCREAMING_SNAKE_CASE = arr[i]
combination_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , index + 1 , UpperCamelCase__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 0 , UpperCamelCase__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowerCamelCase : str = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 168 |
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
return x + 2
class __snake_case( unittest.TestCase ):
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''x = 3'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
assert result == 3
self.assertDictEqual(A_ , {'''x''': 3} )
_SCREAMING_SNAKE_CASE = '''x = y'''
_SCREAMING_SNAKE_CASE = {'''y''': 5}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ , {'''x''': 5, '''y''': 5} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''y = add_two(x)'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''add_two''': add_two} , state=A_ )
assert result == 5
self.assertDictEqual(A_ , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
assert result is None
assert "tried to execute add_two" in out.out
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''x = 3'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
assert result == 3
self.assertDictEqual(A_ , {'''x''': 3} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''add_two''': add_two} , state=A_ )
self.assertDictEqual(A_ , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(A_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''x = 3\ny = 5'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ , {'''x''': 3, '''y''': 5} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''text = f\'This is x: {x}.\''''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(A_ , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(A_ , {'''x''': 3, '''y''': 2} )
_SCREAMING_SNAKE_CASE = {'''x''': 8}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ , {'''x''': 8, '''y''': 5} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''test_list = [x, add_two(x)]'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''add_two''': add_two} , state=A_ )
self.assertListEqual(A_ , [3, 5] )
self.assertDictEqual(A_ , {'''x''': 3, '''test_list''': [3, 5]} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''y = x'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
assert result == 3
self.assertDictEqual(A_ , {'''x''': 3, '''y''': 3} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''test_list = [x, add_two(x)]\ntest_list[1]'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''add_two''': add_two} , state=A_ )
assert result == 5
self.assertDictEqual(A_ , {'''x''': 3, '''test_list''': [3, 5]} )
_SCREAMING_SNAKE_CASE = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''add_two''': add_two} , state=A_ )
assert result == 5
self.assertDictEqual(A_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''x = 0\nfor i in range(3):\n x = i'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''range''': range} , state=A_ )
assert result == 2
self.assertDictEqual(A_ , {'''x''': 2, '''i''': 2} )
| 168 | 1 |
def a__ ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
__lowerCAmelCase ={
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class __magic_name__ ( _a):
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
_UpperCAmelCase : List[Any] = DistilBertTokenizer
def __init__( self : List[str] ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,__SCREAMING_SNAKE_CASE : Tuple=None ,__SCREAMING_SNAKE_CASE : Dict=True ,__SCREAMING_SNAKE_CASE : List[Any]="[UNK]" ,__SCREAMING_SNAKE_CASE : List[Any]="[SEP]" ,__SCREAMING_SNAKE_CASE : Tuple="[PAD]" ,__SCREAMING_SNAKE_CASE : Union[str, Any]="[CLS]" ,__SCREAMING_SNAKE_CASE : Optional[Any]="[MASK]" ,__SCREAMING_SNAKE_CASE : str=True ,__SCREAMING_SNAKE_CASE : str=None ,**__SCREAMING_SNAKE_CASE : Union[str, Any] ,):
super().__init__(
__SCREAMING_SNAKE_CASE ,tokenizer_file=__SCREAMING_SNAKE_CASE ,do_lower_case=__SCREAMING_SNAKE_CASE ,unk_token=__SCREAMING_SNAKE_CASE ,sep_token=__SCREAMING_SNAKE_CASE ,pad_token=__SCREAMING_SNAKE_CASE ,cls_token=__SCREAMING_SNAKE_CASE ,mask_token=__SCREAMING_SNAKE_CASE ,tokenize_chinese_chars=__SCREAMING_SNAKE_CASE ,strip_accents=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,__SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("strip_accents" ,__SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,__SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(__SCREAMING_SNAKE_CASE ,normalizer_state.pop("type" ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = do_lower_case
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : Any=None ):
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : List[int] ,__SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Optional[str] = None ):
UpperCAmelCase = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE ,name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 333 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowercase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def A__ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = Github(os.environ["GITHUB_TOKEN"] )
snake_case__ : Tuple = g.get_repo("huggingface/diffusers" )
snake_case__ : Union[str, Any] = repo.get_issues(state="open" )
for issue in open_issues:
snake_case__ : str = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
snake_case__ : Tuple = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 704 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : int = ['''image_processor''', '''tokenizer''']
__magic_name__ : Optional[int] = '''LayoutLMv3ImageProcessor'''
__magic_name__ : Optional[int] = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase__ , )
snake_case__ : List[str] = kwargs.pop("feature_extractor")
snake_case__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(lowerCamelCase__ , lowerCamelCase__)
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.")
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True.")
# first, apply the image processor
snake_case__ : Dict = self.image_processor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase__ , lowerCamelCase__):
snake_case__ : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case__ : Any = features["words"]
snake_case__ : Union[str, Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel values
snake_case__ : Any = features.pop("pixel_values")
if return_overflowing_tokens is True:
snake_case__ : str = self.get_overflowing_images(lowerCamelCase__ , encoded_inputs["overflow_to_sample_mapping"])
snake_case__ : Optional[Any] = images
return encoded_inputs
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> Any:
'''simple docstring'''
snake_case__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(lowerCamelCase__) != len(lowerCamelCase__):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f""" {len(lowerCamelCase__)} and {len(lowerCamelCase__)}""")
return images_with_overflow
def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__)
def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase__ , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase__ , )
return self.image_processor
| 150 | 0 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def snake_case ( _a: dict )-> Dict:
'''simple docstring'''
return (data["data"], data["target"])
def snake_case ( _a: np.ndarray , _a: np.ndarray )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = XGBClassifier()
classifier.fit(lowerCAmelCase_ , lowerCAmelCase_ )
return classifier
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = load_iris()
lowerCamelCase__ = data_handling(lowerCAmelCase_ )
lowerCamelCase__ = train_test_split(
lowerCAmelCase_ , lowerCAmelCase_ , test_size=0.25 )
lowerCamelCase__ = iris["target_names"]
# Create an XGBoost Classifier from the training data
lowerCamelCase__ = xgboost(lowerCAmelCase_ , lowerCAmelCase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , display_labels=lowerCAmelCase_ , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 510 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: str ):
def get_matched_characters(lowerCAmelCase_: str , lowerCAmelCase_: str ) -> str:
snake_case_ : Tuple = []
snake_case_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
snake_case_ : str = int(max(0 , i - limit ) )
snake_case_ : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCAmelCase_ )
snake_case_ : List[Any] = f"{_stra[0:_stra.index(lowerCAmelCase_ )]} {_stra[_stra.index(lowerCAmelCase_ ) + 1:]}"
return "".join(lowerCAmelCase_ )
# matching characters
snake_case_ : List[Any] = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : int = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = len(lowerCAmelCase_ )
# transposition
snake_case_ : List[str] = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if ca != ca] ) // 2
)
if not match_count:
snake_case_ : str = 0.0
else:
snake_case_ : Optional[Any] = (
1
/ 3
* (
match_count / len(lowerCAmelCase_ )
+ match_count / len(lowerCAmelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
snake_case_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 666 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Optional[Any] = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 446 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[Any] = 8
# DPR tok
SCREAMING_SNAKE_CASE : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = os.path.join(UpperCAmelCase__ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE : List[Any] = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(UpperCAmelCase__ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(UpperCAmelCase__ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase__ ) )
def _lowercase ( self : Optional[Any] ) ->DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def _lowercase ( self : Optional[Any] ) ->DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def _lowercase ( self : Optional[int] ) ->BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def _lowercase ( self : Any ) ->List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
SCREAMING_SNAKE_CASE : Tuple = dataset
SCREAMING_SNAKE_CASE : Union[str, Any] = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowercase ( self : List[Any] , UpperCAmelCase__ : bool ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , )
if from_disk:
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """dataset""" )
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , """index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) )
del dataset
SCREAMING_SNAKE_CASE : Any = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase__ ) , )
return retriever
def _lowercase ( self : int ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" )
SCREAMING_SNAKE_CASE : Optional[Any] = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(UpperCAmelCase__ , open(UpperCAmelCase__ , """wb""" ) )
SCREAMING_SNAKE_CASE : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , )
SCREAMING_SNAKE_CASE : Any = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : str = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = retriever.retrieve(UpperCAmelCase__ , n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : List[str] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_dataset()
retriever.save_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Dict = retriever.retrieve(UpperCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : str = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = retriever.retrieve(UpperCAmelCase__ , n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : int = retriever.retrieve(UpperCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : Dict ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = retriever.retrieve(UpperCAmelCase__ , n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = retriever.retrieve(UpperCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : Tuple ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_legacy_index_retriever()
SCREAMING_SNAKE_CASE : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = retriever.retrieve(UpperCAmelCase__ , n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) , UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : Any ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Dict = retriever.retrieve(UpperCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase ( self : Tuple ) ->Tuple:
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE : str = [[5, 7], [1_0, 1_1]]
SCREAMING_SNAKE_CASE : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : int = retriever(UpperCAmelCase__ , UpperCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
SCREAMING_SNAKE_CASE : Union[str, Any] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase__ , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.get_dpr_ctx_encoder_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
retriever.set_ctx_encoder_tokenizer(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = [[5, 7], [1_0, 1_1]]
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Union[str, Any] = retriever(UpperCAmelCase__ , UpperCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase__ )
self.assertEqual(
len(UpperCAmelCase__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , UpperCAmelCase__ ) # check for doc token related keys in dictionary.
| 446 | 1 |
import mpmath # for roots of unity
import numpy as np
class __A :
"""simple docstring"""
def __init__( self , a__=None , a__=None):
"""simple docstring"""
_lowerCamelCase : List[str] = list(poly_a or [0])[:]
_lowerCamelCase : Union[str, Any] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_lowerCamelCase : Optional[int] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
_lowerCamelCase : Any = len(self.polyB)
# Add 0 to make lengths equal a power of 2
_lowerCamelCase : Dict = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
_lowerCamelCase : int = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
_lowerCamelCase : Optional[int] = self.__multiply()
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : List[Any] = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(a__) <= 1:
return dft[0]
#
_lowerCamelCase : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
_lowerCamelCase : Optional[Any] = [[] for i in range(a__)]
_lowerCamelCase : List[str] = self.root**next_ncol
# First half of next step
_lowerCamelCase : Tuple = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(a__):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
_lowerCamelCase : Tuple = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(a__):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
_lowerCamelCase : Tuple = new_dft
_lowerCamelCase : Union[str, Any] = next_ncol // 2
return dft[0]
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.__dft('''A''')
_lowerCamelCase : Optional[Any] = self.__dft('''B''')
_lowerCamelCase : List[str] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
_lowerCamelCase : str = 2
while next_ncol <= self.c_max_length:
_lowerCamelCase : Optional[int] = [[] for i in range(a__)]
_lowerCamelCase : Dict = self.root ** (next_ncol // 2)
_lowerCamelCase : List[str] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
_lowerCamelCase : Any = new_inverse_c
next_ncol *= 2
# Unpack
_lowerCamelCase : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''A = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A]))
_lowerCamelCase : List[str] = '''B = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B]))
_lowerCamelCase : Tuple = '''A*B = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.product))
return F"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,)
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=a__ , )
assert hasattr(self , '''env''')
def __snake_case ( self , a__=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=a__ , instance_type=self.instance_type , debugger_hook_config=a__ , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def __snake_case ( self , a__):
"""simple docstring"""
TrainingJobAnalytics(a__).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_lowerCamelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_lowerCamelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''])
_lowerCamelCase : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get('''TrainingTimeInSeconds''' , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy)
assert all(t <= self.results['''eval_loss'''] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''') as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , a__)
| 114 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _A ( UpperCamelCase ):
'''simple docstring'''
_lowercase = 'big_bird'
def __init__( self : Optional[Any] , lowerCamelCase : Union[str, Any]=50_358 , lowerCamelCase : List[Any]=768 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : List[str]=12 , lowerCamelCase : int=3_072 , lowerCamelCase : List[Any]="gelu_new" , lowerCamelCase : str=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Optional[Any]=4_096 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : List[str]=1e-12 , lowerCamelCase : Dict=True , lowerCamelCase : List[Any]=0 , lowerCamelCase : str=1 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : List[str]=66 , lowerCamelCase : Any="block_sparse" , lowerCamelCase : str=True , lowerCamelCase : str=False , lowerCamelCase : Optional[Any]=64 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : int=None , **lowerCamelCase : List[str] , )-> str:
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , sep_token_id=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : List[str] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : Optional[int] = initializer_range
snake_case__ : Tuple = type_vocab_size
snake_case__ : Union[str, Any] = layer_norm_eps
snake_case__ : Dict = use_cache
snake_case__ : Tuple = rescale_embeddings
snake_case__ : List[str] = attention_type
snake_case__ : Union[str, Any] = use_bias
snake_case__ : List[str] = block_size
snake_case__ : Optional[int] = num_random_blocks
snake_case__ : List[Any] = classifier_dropout
class _A ( UpperCamelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Optional[Any] )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case__ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 172 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _A ( UpperCamelCase ):
'''simple docstring'''
_lowercase = 42
_lowercase = 42
class _A ( nn.Module ):
'''simple docstring'''
_lowercase = 42
_lowercase = (16, 32, 96, 256)
_lowercase = jnp.floataa
def __lowerCAmelCase ( self : Tuple )-> Any:
snake_case__ : Union[str, Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case__ : List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case__ : List[str] = self.block_out_channels[i]
snake_case__ : Union[str, Any] = self.block_out_channels[i + 1]
snake_case__ : Optional[int] = nn.Conv(
lowerCamelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase )
snake_case__ : int = nn.Conv(
lowerCamelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase )
snake_case__ : Any = blocks
snake_case__ : Union[str, Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , lowerCamelCase : Any )-> Tuple:
snake_case__ : int = self.conv_in(lowerCamelCase )
snake_case__ : Dict = nn.silu(lowerCamelCase )
for block in self.blocks:
snake_case__ : Dict = block(lowerCamelCase )
snake_case__ : str = nn.silu(lowerCamelCase )
snake_case__ : Union[str, Any] = self.conv_out(lowerCamelCase )
return embedding
@flax_register_to_config
class _A ( nn.Module , UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
_lowercase = 32
_lowercase = 4
_lowercase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_lowercase = False
_lowercase = (320, 640, 1280, 1280)
_lowercase = 2
_lowercase = 8
_lowercase = None
_lowercase = 1280
_lowercase = 0.0
_lowercase = False
_lowercase = jnp.floataa
_lowercase = True
_lowercase = 0
_lowercase = "rgb"
_lowercase = (16, 32, 96, 256)
def __lowerCAmelCase ( self : Optional[int] , lowerCamelCase : jax.random.KeyArray )-> FrozenDict:
# init input tensors
snake_case__ : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case__ : Dict = jnp.zeros(lowerCamelCase , dtype=jnp.floataa )
snake_case__ : str = jnp.ones((1,) , dtype=jnp.intaa )
snake_case__ : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case__ : Dict = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case__ : Optional[Any] = jnp.zeros(lowerCamelCase , dtype=jnp.floataa )
snake_case__ , snake_case__ : List[Any] = jax.random.split(lowerCamelCase )
snake_case__ : Optional[Any] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )["params"]
def __lowerCAmelCase ( self : int )-> int:
snake_case__ : List[Any] = self.block_out_channels
snake_case__ : Union[str, Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case__ : Any = self.num_attention_heads or self.attention_head_dim
# input
snake_case__ : Any = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case__ : List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case__ : Union[str, Any] = FlaxTimestepEmbedding(lowerCamelCase , dtype=self.dtype )
snake_case__ : Tuple = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
snake_case__ : Union[str, Any] = self.only_cross_attention
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Dict = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Union[str, Any] = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case__ : Optional[Any] = []
snake_case__ : str = []
snake_case__ : Tuple = block_out_channels[0]
snake_case__ : Tuple = nn.Conv(
lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case__ : Optional[int] = output_channel
snake_case__ : int = block_out_channels[i]
snake_case__ : Union[str, Any] = i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case__ : int = FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
snake_case__ : Any = FlaxDownBlockaD(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCamelCase )
for _ in range(self.layers_per_block ):
snake_case__ : str = nn.Conv(
lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase )
if not is_final_block:
snake_case__ : Tuple = nn.Conv(
lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase )
snake_case__ : Optional[Any] = down_blocks
snake_case__ : List[str] = controlnet_down_blocks
# mid
snake_case__ : Union[str, Any] = block_out_channels[-1]
snake_case__ : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
snake_case__ : Tuple = nn.Conv(
lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : float = 1.0 , lowerCamelCase : bool = True , lowerCamelCase : bool = False , )-> Union[FlaxControlNetOutput, Tuple]:
snake_case__ : int = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case__ : Union[str, Any] = jnp.flip(lowerCamelCase , axis=1 )
# 1. time
if not isinstance(lowerCamelCase , jnp.ndarray ):
snake_case__ : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case__ : Optional[int] = timesteps.astype(dtype=jnp.floataa )
snake_case__ : Optional[int] = jnp.expand_dims(lowerCamelCase , 0 )
snake_case__ : Any = self.time_proj(lowerCamelCase )
snake_case__ : List[Any] = self.time_embedding(lowerCamelCase )
# 2. pre-process
snake_case__ : Dict = jnp.transpose(lowerCamelCase , (0, 2, 3, 1) )
snake_case__ : Any = self.conv_in(lowerCamelCase )
snake_case__ : Dict = jnp.transpose(lowerCamelCase , (0, 2, 3, 1) )
snake_case__ : str = self.controlnet_cond_embedding(lowerCamelCase )
sample += controlnet_cond
# 3. down
snake_case__ : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ , snake_case__ : List[str] = down_block(lowerCamelCase , lowerCamelCase , lowerCamelCase , deterministic=not train )
else:
snake_case__ , snake_case__ : List[str] = down_block(lowerCamelCase , lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case__ : Optional[Any] = self.mid_block(lowerCamelCase , lowerCamelCase , lowerCamelCase , deterministic=not train )
# 5. contronet blocks
snake_case__ : List[str] = ()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase , self.controlnet_down_blocks ):
snake_case__ : List[str] = controlnet_block(lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case__ : Optional[Any] = controlnet_down_block_res_samples
snake_case__ : Optional[int] = self.controlnet_mid_block(lowerCamelCase )
# 6. scaling
snake_case__ : Optional[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase , mid_block_res_sample=lowerCamelCase )
| 172 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCAmelCase__ ( _a : str , _a : Tuple ):
snake_case_ : List[Any] = F'''{sampling_rate}'''
snake_case_ : Tuple = "1"
snake_case_ : int = "f32le"
snake_case_ : Tuple = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(_UpperCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
snake_case_ : Optional[Any] = ffmpeg_process.communicate(_UpperCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
snake_case_ : int = output_stream[0]
snake_case_ : Dict = np.frombuffer(_UpperCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def lowerCAmelCase__ ( _a : int , _a : List[str] , _a : Any = "f32le" , ):
snake_case_ : Dict = F'''{sampling_rate}'''
snake_case_ : Optional[Any] = "1"
if format_for_conversion == "s16le":
snake_case_ : Optional[int] = 2
elif format_for_conversion == "f32le":
snake_case_ : Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
snake_case_ : str = platform.system()
if system == "Linux":
snake_case_ : Dict = "alsa"
snake_case_ : Any = "default"
elif system == "Darwin":
snake_case_ : Optional[int] = "avfoundation"
snake_case_ : Optional[int] = ":0"
elif system == "Windows":
snake_case_ : Optional[Any] = "dshow"
snake_case_ : Optional[Any] = "default"
snake_case_ : Tuple = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
snake_case_ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
snake_case_ : str = _ffmpeg_stream(_UpperCamelCase , _UpperCamelCase )
for item in iterator:
yield item
def lowerCAmelCase__ ( _a : str , _a : Optional[int] , _a : List[Any] = None , _a : Union[str, Any] = None , _a : str = "f32le" , ):
if stream_chunk_s is not None:
snake_case_ : Optional[Any] = stream_chunk_s
else:
snake_case_ : str = chunk_length_s
snake_case_ : List[str] = ffmpeg_microphone(_UpperCamelCase , _UpperCamelCase , format_for_conversion=_UpperCamelCase )
if format_for_conversion == "s16le":
snake_case_ : Any = np.intaa
snake_case_ : int = 2
elif format_for_conversion == "f32le":
snake_case_ : Any = np.floataa
snake_case_ : int = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
snake_case_ : Tuple = chunk_length_s / 6
snake_case_ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_UpperCamelCase , (int, float) ):
snake_case_ : Optional[int] = [stride_length_s, stride_length_s]
snake_case_ : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
snake_case_ : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
snake_case_ : int = datetime.datetime.now()
snake_case_ : Optional[Any] = datetime.timedelta(seconds=_UpperCamelCase )
for item in chunk_bytes_iter(_UpperCamelCase , _UpperCamelCase , stride=(stride_left, stride_right) , stream=_UpperCamelCase ):
# Put everything back in numpy scale
snake_case_ : Dict = np.frombuffer(item["raw"] , dtype=_UpperCamelCase )
snake_case_ : Any = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
snake_case_ : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowerCAmelCase__ ( _a : List[Any] , _a : Dict , _a : Any , _a : Any = False ):
snake_case_ : Tuple = b""
snake_case_ , snake_case_ : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
snake_case_ : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(_UpperCamelCase ) < chunk_len:
snake_case_ : List[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_UpperCamelCase ) >= chunk_len:
# We are flushing the accumulator
snake_case_ : int = (_stride_left, stride_right)
snake_case_ : int = {"raw": acc[:chunk_len], "stride": stride}
if stream:
snake_case_ : str = False
yield item
snake_case_ : List[Any] = stride_left
snake_case_ : Dict = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_UpperCamelCase ) > stride_left:
snake_case_ : int = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
snake_case_ : int = False
yield item
def lowerCAmelCase__ ( _a : int , _a : Optional[Any] ):
snake_case_ : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(_UpperCamelCase , stdout=subprocess.PIPE , bufsize=_UpperCamelCase ) as ffmpeg_process:
while True:
snake_case_ : Union[str, Any] = ffmpeg_process.stdout.read(_UpperCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 568 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =field(default="""image-classification""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
__UpperCAmelCase : ClassVar[Features] =Features({"""image""": Image()} )
__UpperCAmelCase : ClassVar[Features] =Features({"""labels""": ClassLabel} )
__UpperCAmelCase : str ="image"
__UpperCAmelCase : str ="labels"
def snake_case ( self , __a ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __a ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
__lowerCAmelCase = copy.deepcopy(self )
__lowerCAmelCase = self.label_schema.copy()
__lowerCAmelCase = features[self.label_column]
__lowerCAmelCase = label_schema
return task_template
@property
def snake_case ( self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 636 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Any = {'''vocab_file''': '''spiece.model'''}
UpperCAmelCase : Dict = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
UpperCAmelCase : Union[str, Any] = {'''bert_for_seq_generation''': 5_12}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[int] = []
UpperCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , _A , _A="<s>" , _A="</s>" , _A="<unk>" , _A="<pad>" , _A="<::::>" , _A = None , **_A , ):
__A : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , pad_token=_A , sep_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__A : Optional[Any] = vocab_file
__A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def UpperCAmelCase_ ( self ):
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__A : Dict = self.__dict__.copy()
__A : Optional[int] = None
return state
def __setstate__( self , _A ):
__A : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A : Dict = {}
__A : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self , _A ):
return self.sp_model.encode(_A , out_type=_A )
def UpperCAmelCase_ ( self , _A ):
return self.sp_model.piece_to_id(_A )
def UpperCAmelCase_ ( self , _A ):
__A : List[str] = self.sp_model.IdToPiece(_A )
return token
def UpperCAmelCase_ ( self , _A ):
__A : List[str] = []
__A : Union[str, Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_A ) + token
__A : Dict = []
else:
current_sub_tokens.append(_A )
out_string += self.sp_model.decode(_A )
return out_string.strip()
def UpperCAmelCase_ ( self , _A , _A = None ):
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : Dict = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , 'wb' ) as fi:
__A : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 708 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _SCREAMING_SNAKE_CASE ( ) -> None:
print('Making key files...' )
make_key_files('rsa' , 10_24 )
print('Key files generation successful.' )
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[tuple[int, int], tuple[int, int]]:
print('Generating prime p...' )
__A : Optional[Any] = rabinMiller.generate_large_prime(a )
print('Generating prime q...' )
__A : Union[str, Any] = rabinMiller.generate_large_prime(a )
__A : Tuple = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
__A : Dict = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(a , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
__A : Any = cryptoMath.find_mod_inverse(a , (p - 1) * (q - 1) )
__A : Dict = (n, e)
__A : Dict = (n, d)
return (public_key, private_key)
def _SCREAMING_SNAKE_CASE ( a , a ) -> None:
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__A , __A : Optional[int] = generate_key(a )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 77 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = path_or_paths
A__ = split if split or isinstance(UpperCamelCase__ , UpperCamelCase__ ) else "train"
A__ = features
A__ = cache_dir
A__ = keep_in_memory
A__ = streaming
A__ = num_proc
A__ = kwargs
@abstractmethod
def lowercase_ ( self ):
'''simple docstring'''
pass
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = features
A__ = cache_dir
A__ = keep_in_memory
A__ = streaming
A__ = num_proc
A__ = kwargs
@abstractmethod
def lowercase_ ( self ):
'''simple docstring'''
pass | 337 |
"""simple docstring"""
def __a ( A , A ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(A ) , A )
return number - int(A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3)) | 337 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
_snake_case = (DDPMScheduler,)
def a__ ( self , **a_ ) -> int:
lowercase : int = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**a_ )
return config
def a__ ( self ) -> Optional[Any]:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a_ )
def a__ ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=a_ , beta_end=a_ )
def a__ ( self ) -> int:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a_ )
def a__ ( self ) -> int:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a_ )
def a__ ( self ) -> Optional[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a_ )
def a__ ( self ) -> Optional[Any]:
self.check_over_configs(thresholding=a_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a_ , prediction_type=a_ , sample_max_value=a_ , )
def a__ ( self ) -> List[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def a__ ( self ) -> Any:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=a_ )
def a__ ( self ) -> Dict:
lowercase : Dict = self.scheduler_classes[0]
lowercase : Optional[int] = self.get_scheduler_config()
lowercase : str = scheduler_class(**a_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def a__ ( self ) -> Union[str, Any]:
lowercase : str = self.scheduler_classes[0]
lowercase : Union[str, Any] = self.get_scheduler_config()
lowercase : Dict = scheduler_class(**a_ )
lowercase : int = len(a_ )
lowercase : Union[str, Any] = self.dummy_model()
lowercase : Dict = self.dummy_sample_deter
lowercase : int = torch.manual_seed(0 )
for t in reversed(range(a_ ) ):
# 1. predict noise residual
lowercase : Dict = model(a_ , a_ )
# 2. predict previous mean of sample x_t-1
lowercase : List[str] = scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase : Optional[int] = pred_prev_sample
lowercase : int = torch.sum(torch.abs(a_ ) )
lowercase : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1e-2
assert abs(result_mean.item() - 0.33_72 ) < 1e-3
def a__ ( self ) -> List[str]:
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : Any = self.get_scheduler_config(prediction_type="v_prediction" )
lowercase : List[str] = scheduler_class(**a_ )
lowercase : str = len(a_ )
lowercase : Optional[Any] = self.dummy_model()
lowercase : str = self.dummy_sample_deter
lowercase : List[str] = torch.manual_seed(0 )
for t in reversed(range(a_ ) ):
# 1. predict noise residual
lowercase : str = model(a_ , a_ )
# 2. predict previous mean of sample x_t-1
lowercase : List[Any] = scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase : Dict = pred_prev_sample
lowercase : Optional[Any] = torch.sum(torch.abs(a_ ) )
lowercase : Optional[Any] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1e-2
assert abs(result_mean.item() - 0.26_31 ) < 1e-3
def a__ ( self ) -> List[Any]:
lowercase : int = self.scheduler_classes[0]
lowercase : Optional[int] = self.get_scheduler_config()
lowercase : Tuple = scheduler_class(**a_ )
lowercase : Dict = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=a_ )
lowercase : str = scheduler.timesteps
for i, timestep in enumerate(a_ ):
if i == len(a_ ) - 1:
lowercase : List[str] = -1
else:
lowercase : List[str] = timesteps[i + 1]
lowercase : int = scheduler.previous_timestep(a_ )
lowercase : Dict = prev_t.item()
self.assertEqual(a_ , a_ )
def a__ ( self ) -> int:
lowercase : Union[str, Any] = self.scheduler_classes[0]
lowercase : Tuple = self.get_scheduler_config()
lowercase : List[str] = scheduler_class(**a_ )
lowercase : List[str] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(a_ , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=a_ )
def a__ ( self ) -> Union[str, Any]:
lowercase : Dict = self.scheduler_classes[0]
lowercase : Union[str, Any] = self.get_scheduler_config()
lowercase : Union[str, Any] = scheduler_class(**a_ )
lowercase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
lowercase : Any = len(a_ )
with self.assertRaises(a_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=a_ , timesteps=a_ )
def a__ ( self ) -> Optional[Any]:
lowercase : List[Any] = self.scheduler_classes[0]
lowercase : int = self.get_scheduler_config()
lowercase : List[Any] = scheduler_class(**a_ )
lowercase : Union[str, Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=a_ )
| 425 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 425 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''MobileViTFeatureExtractor''']
_snake_case = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 383 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__A : Tuple = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class __A ( unittest.TestCase , lowerCAmelCase ):
def lowercase__ ( self : str ):
lowerCAmelCase : List[Any] = load_tool('text-question-answering' )
self.tool.setup()
lowerCAmelCase : List[Any] = load_tool('text-question-answering' , remote=UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : List[str] = self.tool(UpperCAmelCase_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
def lowercase__ ( self : str ):
lowerCAmelCase : Tuple = self.remote_tool(UpperCAmelCase_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
def lowercase__ ( self : str ):
lowerCAmelCase : str = self.tool(text=UpperCAmelCase_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Dict = self.remote_tool(text=UpperCAmelCase_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
| 343 | 0 |
def UpperCamelCase ( _a , _a , _a ) -> int:
'''simple docstring'''
def count_of_possible_combinations(_a ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def UpperCamelCase ( _a , _a , _a ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a , _a ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ :Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowercase_ :List[Any] = answer
return answer
lowercase_ :Dict = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def UpperCamelCase ( _a , _a , _a ) -> int:
'''simple docstring'''
lowercase_ :Optional[int] = [0] * (target + 1)
lowercase_ :int = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Optional[Any] = 3
SCREAMING_SNAKE_CASE : Tuple = 5
SCREAMING_SNAKE_CASE : List[Any] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 441 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : List[str] =CLIPConfig
lowercase : Tuple =["""CLIPEncoderLayer"""]
def __init__( self , UpperCamelCase_ ):
super().__init__(UpperCamelCase_ )
lowercase_ :Union[str, Any] = CLIPVisionModelWithProjection(config.vision_config )
lowercase_ :str = nn.Linear(config.vision_config.projection_dim , 1 )
lowercase_ :Optional[int] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=0.5 , UpperCamelCase_=0.5 ):
lowercase_ :Optional[int] = self.vision_model(UpperCamelCase_ )[0]
lowercase_ :Optional[int] = self.p_head(UpperCamelCase_ )
lowercase_ :List[Any] = nsfw_detected.flatten()
lowercase_ :List[str] = nsfw_detected > p_threshold
lowercase_ :Optional[int] = nsfw_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ):
if nsfw_detected_:
lowercase_ :str = np.zeros(images[idx].shape )
lowercase_ :int = self.w_head(UpperCamelCase_ )
lowercase_ :Union[str, Any] = watermark_detected.flatten()
lowercase_ :int = watermark_detected > w_threshold
lowercase_ :int = watermark_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase_ ):
if watermark_detected_:
lowercase_ :Dict = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 441 | 1 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
SCREAMING_SNAKE_CASE: str = namedtuple('''covid_data''', '''cases deaths recovered''')
def _a ( lowerCAmelCase = "https://www.worldometers.info/coronavirus/" )-> covid_data:
SCREAMING_SNAKE_CASE_ = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE__ ).content ).xpath(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE: Optional[int] = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats())) | 360 |
'''simple docstring'''
import os
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str = "matrix.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__ ) ) as in_file:
__a = in_file.read()
__a = [[int(SCREAMING_SNAKE_CASE__ ) for cell in row.split(',' )] for row in data.strip().splitlines()]
__a = [[0 for cell in row] for row in grid]
__a = len(grid[0] )
__a = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
__a = grid[0][0]
for i in range(1, SCREAMING_SNAKE_CASE__ ):
__a = grid[0][i] + dp[0][i - 1]
for i in range(1, SCREAMING_SNAKE_CASE__ ):
__a = grid[i][0] + dp[i - 1][0]
for i in range(1, SCREAMING_SNAKE_CASE__ ):
for j in range(1, SCREAMING_SNAKE_CASE__ ):
__a = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""") | 448 | 0 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
class __magic_name__ :
def __init__( self : Union[str, Any] , snake_case_ : int ):
__snake_case = metric_id
class __magic_name__ :
_SCREAMING_SNAKE_CASE : Optional[int] = [MetricMock(lowercase__ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def lowerCAmelCase ( self : Optional[Any] ):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if "tmp_path" in args:
__snake_case = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(SCREAMING_SNAKE_CASE , match="https://huggingface.co/docs/evaluate" ):
func(*SCREAMING_SNAKE_CASE )
| 614 |
"""simple docstring"""
from __future__ import annotations
_SCREAMING_SNAKE_CASE = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class __magic_name__ :
def __init__( self : Any , snake_case_ : dict[str, list[str]] , snake_case_ : str ):
__snake_case = graph
# mapping node to its parent in resulting breadth first tree
__snake_case = {}
__snake_case = source_vertex
def lowerCAmelCase ( self : Union[str, Any] ):
__snake_case = {self.source_vertex}
__snake_case = None
__snake_case = [self.source_vertex] # first in first out queue
while queue:
__snake_case = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case_ )
__snake_case = vertex
queue.append(snake_case_ )
def lowerCAmelCase ( self : Dict , snake_case_ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__snake_case = self.parent.get(snake_case_ )
if target_vertex_parent is None:
__snake_case = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(snake_case_ )
return self.shortest_path(snake_case_ ) + F'''->{target_vertex}'''
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 614 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowercase ( SCREAMING_SNAKE_CASE ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> XGBClassifier:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = XGBClassifier()
classifier.fit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return classifier
def lowercase ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data_handling(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = train_test_split(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , test_size=0.25 )
SCREAMING_SNAKE_CASE_ = iris['target_names']
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_ = xgboost(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , display_labels=__SCREAMING_SNAKE_CASE , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 205 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
SCREAMING_SNAKE_CASE = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if weight_type is not None:
UpperCAmelCase_ = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(__SCREAMING_SNAKE_CASE )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = "weight"
else:
UpperCAmelCase_ = None
set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
if config_path is not None:
UpperCAmelCase_ = UniSpeechSatConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = UniSpeechSatConfig()
UpperCAmelCase_ = ""
if is_finetuned:
UpperCAmelCase_ = UniSpeechSatForCTC(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = UniSpeechSatForPreTraining(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 579 | 0 |
from collections import defaultdict
def __lowerCAmelCase ( snake_case : Any , snake_case : List[Any] ) -> bool:
__lowerCamelCase: Tuple = first_str.lower().strip()
__lowerCamelCase: int = second_str.lower().strip()
# Remove whitespace
__lowerCamelCase: List[str] = first_str.replace(""" """ , """""" )
__lowerCamelCase: Dict = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
return False
# Default values for count should be 0
__lowerCamelCase: Any = defaultdict(__UpperCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__UpperCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_A : Union[str, Any] = input('''Enter the first string ''').strip()
_A : List[str] = input('''Enter the second string ''').strip()
_A : Union[str, Any] = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 708 |
def __lowerCAmelCase ( snake_case : int = 1000000 ) -> int:
__lowerCamelCase: Union[str, Any] = set(range(3 , snake_case , 2 ) )
primes.add(2 )
for p in range(3 , snake_case , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case , snake_case ) ) )
__lowerCamelCase: Any = [float(snake_case ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case , limit + 1 , snake_case ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 189 | 0 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> Union[str, Any]:
_snake_case = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any ) -> Dict:
_snake_case = 0
while b > 0:
if b & 1:
_snake_case = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 224 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class lowerCAmelCase__ ( A_ ):
__a = """switch_transformers"""
__a = ["""past_key_values"""]
__a = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Optional[Any] , _lowerCamelCase : Any=32128 , _lowerCamelCase : List[Any]=768 , _lowerCamelCase : Any=64 , _lowerCamelCase : Dict=2048 , _lowerCamelCase : int=64 , _lowerCamelCase : Dict=12 , _lowerCamelCase : str=3 , _lowerCamelCase : Union[str, Any]=12 , _lowerCamelCase : Tuple=3 , _lowerCamelCase : List[str]=12 , _lowerCamelCase : List[str]=8 , _lowerCamelCase : int=False , _lowerCamelCase : Union[str, Any]=0.0_1 , _lowerCamelCase : Any="float32" , _lowerCamelCase : str=False , _lowerCamelCase : Optional[int]=32 , _lowerCamelCase : Any=128 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : Any=1e-6 , _lowerCamelCase : Union[str, Any]=0.0_0_1 , _lowerCamelCase : Tuple=0.0_0_1 , _lowerCamelCase : Dict=1.0 , _lowerCamelCase : int="relu" , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=True , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : List[Any]=1 , **_lowerCamelCase : Optional[Any] , ):
_snake_case = vocab_size
_snake_case = d_model
_snake_case = d_kv
_snake_case = d_ff
_snake_case = num_sparse_encoder_layers
_snake_case = num_layers
_snake_case = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_snake_case = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_snake_case = self.num_layers // self.num_sparse_encoder_layers
else:
_snake_case = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_snake_case = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_snake_case = self.num_decoder_layers # HACK: this will create 0 sparse layers
_snake_case = num_heads
_snake_case = num_experts
_snake_case = expert_capacity
_snake_case = router_bias
_snake_case = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
_snake_case = router_dtype
_snake_case = router_ignore_padding_tokens
_snake_case = relative_attention_num_buckets
_snake_case = relative_attention_max_distance
_snake_case = dropout_rate
_snake_case = layer_norm_epsilon
_snake_case = initializer_factor
_snake_case = feed_forward_proj
_snake_case = use_cache
_snake_case = add_router_probs
_snake_case = router_z_loss_coef
_snake_case = router_aux_loss_coef
_snake_case = self.feed_forward_proj.split('''-''' )
_snake_case = act_info[-1]
_snake_case = act_info[0] == '''gated'''
if len(_lowerCamelCase ) > 1 and act_info[0] != "gated" or len(_lowerCamelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_snake_case = '''gelu_new'''
super().__init__(
pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase , )
| 224 | 1 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int]=13 , UpperCAmelCase__ : Tuple=30 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : int=37 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : str=10 , UpperCAmelCase__ : Dict=0.02 , ):
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Optional[int] =batch_size
lowercase : List[str] =image_size
lowercase : Dict =patch_size
lowercase : int =num_channels
lowercase : Any =is_training
lowercase : Optional[Any] =use_labels
lowercase : Any =hidden_size
lowercase : int =num_hidden_layers
lowercase : Optional[int] =num_attention_heads
lowercase : Any =intermediate_size
lowercase : Dict =hidden_act
lowercase : List[str] =hidden_dropout_prob
lowercase : str =attention_probs_dropout_prob
lowercase : Optional[int] =type_sequence_label_size
lowercase : str =initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase : int =(image_size // patch_size) ** 2
lowercase : Dict =num_patches + 1
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : int =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Tuple =ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Optional[Any] =FlaxViTModel(config=UpperCAmelCase__ )
lowercase : Any =model(UpperCAmelCase__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowercase : List[Any] =(self.image_size, self.image_size)
lowercase : Tuple =(self.patch_size, self.patch_size)
lowercase : str =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : Union[str, Any] =self.type_sequence_label_size
lowercase : Dict =FlaxViTForImageClassification(config=UpperCAmelCase__ )
lowercase : Union[str, Any] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase : Tuple =1
lowercase : Any =FlaxViTForImageClassification(UpperCAmelCase__ )
lowercase : Optional[int] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase : Optional[Any] =model(UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : str =self.prepare_config_and_inputs()
(
lowercase
) : Optional[Any] =config_and_inputs
lowercase : List[Any] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Optional[Any] =FlaxViTModelTester(self )
lowercase : List[str] =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict =model_class(UpperCAmelCase__ )
lowercase : Tuple =inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Tuple =[*signature.parameters.keys()]
lowercase : List[str] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : Dict =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Union[str, Any] =model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Dict ):
return model(pixel_values=UpperCAmelCase__ , **UpperCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
lowercase : Dict =model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase : Tuple =model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Union[str, Any] =model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
lowercase : Tuple =model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 712 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int:
lowercase : Dict =set(range(3 , __magic_name__ , 2 ) )
primes.add(2 )
for p in range(3 , __magic_name__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) )
lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(__magic_name__ , limit + 1 , __magic_name__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 | 0 |
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Any = name
UpperCamelCase_: Any = value
UpperCamelCase_: Optional[Any] = weight
def __repr__( self ):
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _a ( self ):
return self.value
def _a ( self ):
return self.name
def _a ( self ):
return self.weight
def _a ( self ):
return self.value / self.weight
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
UpperCamelCase_: Any = []
for i in range(len(UpperCAmelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: Optional[int] = sorted(UpperCAmelCase__ , key=UpperCAmelCase__ , reverse=UpperCAmelCase__ )
UpperCamelCase_: Dict = []
UpperCamelCase_ ,UpperCamelCase_: Any = 0.0, 0.0
for i in range(len(UpperCAmelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def snake_case () -> List[str]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 57 |
"""simple docstring"""
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCamelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 222 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
_A , _A = {}, {}
if padding is not None:
_A = padding
if truncation is not None:
_A = truncation
if top_k is not None:
_A = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]:
if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = {"""image""": image, """question""": question}
else:
_A = image
_A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
return results
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any:
_A = load_image(inputs["""image"""] )
_A = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
_A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase_ )
return model_inputs
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = self.model(**lowerCAmelCase_ )
return model_outputs
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
_A = self.model.config.num_labels
if self.framework == "pt":
_A = model_outputs.logits.sigmoid()[0]
_A , _A = probs.topk(lowerCAmelCase_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_A = scores.tolist()
_A = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 83 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 | 1 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ (SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , lowerCAmelCase_ : CLIPSegForImageSegmentation , lowerCAmelCase_ : CLIPSegProcessor , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : CLIPTextModel , lowerCAmelCase_ : CLIPTokenizer , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase_ : StableDiffusionSafetyChecker , lowerCAmelCase_ : CLIPImageProcessor , ) -> int:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ : int = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate("steps_offset!=1" , "1.0.0" , _a , standard_warn=_a )
UpperCAmelCase_ : int = dict(scheduler.config )
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : List[Any] = FrozenDict(_a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ : str = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate("skip_prk_steps not set" , "1.0.0" , _a , standard_warn=_a )
UpperCAmelCase_ : Dict = dict(scheduler.config )
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Union[str, Any] = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Optional[Union[str, int]] = "auto" ) -> List[str]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
self.enable_attention_slicing(_a )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ : Dict = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[str] , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase_ : str , lowerCAmelCase_ : int = 512 , lowerCAmelCase_ : int = 512 , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : float = 7.5 , lowerCAmelCase_ : Optional[Union[str, List[str]]] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase_ : int = 1 , **lowerCAmelCase_ : Optional[Any] , ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCAmelCase_ : Union[str, Any] = self.segmentation_model(**_a )
UpperCAmelCase_ : List[str] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ : Optional[Any] = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ : Tuple = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 95 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=1E-12 ) -> List[Any]:
__lowerCamelCase : int = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(_lowerCAmelCase ,axis=1 ) ,a_min=_lowerCAmelCase ) ).T
__lowerCamelCase : Tuple = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(_lowerCAmelCase ,axis=1 ) ,a_min=_lowerCAmelCase ) ).T
return jnp.matmul(_lowerCAmelCase ,norm_emb_a.T )
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
a_ =42
a_ =jnp.floataa
def _lowercase ( self : Optional[Any] ) -> Tuple:
__lowerCamelCase : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config )
__lowerCamelCase : Any = nn.Dense(self.config.projection_dim , use_bias=_a , dtype=self.dtype )
__lowerCamelCase : str = self.param('concept_embeds' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__lowerCamelCase : str = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__lowerCamelCase : Optional[int] = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (17,) )
__lowerCamelCase : Dict = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self : Tuple , _a : Any ) -> Optional[int]:
__lowerCamelCase : int = self.vision_model(_a )[1]
__lowerCamelCase : Optional[int] = self.visual_projection(_a )
__lowerCamelCase : Optional[Any] = jax_cosine_distance(_a , self.special_care_embeds )
__lowerCamelCase : Tuple = jax_cosine_distance(_a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__lowerCamelCase : Dict = 0.0
__lowerCamelCase : str = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__lowerCamelCase : Optional[int] = jnp.round(_a , 3 )
__lowerCamelCase : Dict = jnp.any(special_scores > 0 , axis=1 , keepdims=_a )
# Use a lower threshold if an image has any special care concept
__lowerCamelCase : Dict = is_special_care * 0.01
__lowerCamelCase : Optional[int] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__lowerCamelCase : List[str] = jnp.round(_a , 3 )
__lowerCamelCase : List[Any] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =CLIPConfig
a_ ="""clip_input"""
a_ =FlaxStableDiffusionSafetyCheckerModule
def __init__( self : List[str] , _a : CLIPConfig , _a : Optional[Tuple] = None , _a : int = 0 , _a : jnp.dtype = jnp.floataa , _a : bool = True , **_a : List[Any] , ) -> List[Any]:
if input_shape is None:
__lowerCamelCase : str = (1, 224, 224, 3)
__lowerCamelCase : List[str] = self.module_class(config=_a , dtype=_a , **_a )
super().__init__(_a , _a , input_shape=_a , seed=_a , dtype=_a , _do_init=_do_init )
def _lowercase ( self : int , _a : jax.random.KeyArray , _a : Tuple , _a : FrozenDict = None ) -> FrozenDict:
# init input tensor
__lowerCamelCase : Optional[int] = jax.random.normal(_a , _a )
__lowerCamelCase ,__lowerCamelCase : Optional[Any] = jax.random.split(_a )
__lowerCamelCase : int = {'params': params_rng, 'dropout': dropout_rng}
__lowerCamelCase : Tuple = self.module.init(_a , _a )['params']
return random_params
def __call__( self : Optional[int] , _a : int , _a : dict = None , ) -> Dict:
__lowerCamelCase : str = jnp.transpose(_a , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(_a , dtype=jnp.floataa ) , rngs={} , )
| 459 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__=13 , __magic_name__=30 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=0.6 , __magic_name__=None , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : str = batch_size
A_ : Union[str, Any] = image_size
A_ : Tuple = patch_size
A_ : List[Any] = num_channels
A_ : Dict = is_training
A_ : Tuple = use_labels
A_ : List[str] = hidden_size
A_ : str = num_hidden_layers
A_ : Any = num_attention_heads
A_ : str = intermediate_size
A_ : Any = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : str = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : Tuple = mask_ratio
A_ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ : Dict = (image_size // patch_size) ** 2
A_ : Union[str, Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
A_ : Optional[Any] = ViTMAEModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
A_ : Dict = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
A_ : str = ViTMAEForPreTraining(__magic_name__ )
model.to(__magic_name__ )
model.eval()
A_ : Tuple = model(__magic_name__ )
A_ : Tuple = (self.image_size // self.patch_size) ** 2
A_ : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ : int = 1
A_ : List[str] = ViTMAEForPreTraining(__magic_name__ )
model.to(__magic_name__ )
model.eval()
A_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Union[str, Any] = model(__magic_name__ )
A_ : List[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Tuple = self.prepare_config_and_inputs()
A_ , A_ , A_ : Any = config_and_inputs
A_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__magic_name__ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : str = ViTMAEModelTester(self )
A_ : Any = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(__magic_name__ )
A_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Any = [*signature.parameters.keys()]
A_ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
np.random.seed(2 )
A_ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A_ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ : List[str] = torch.from_numpy(__magic_name__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ : List[str] = pt_noise
super().check_pt_tf_models(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
A_ : Dict = outputs[0].cpu().numpy()
A_ : List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
A_ : Dict = model_class.from_pretrained(__magic_name__ )
model.to(__magic_name__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
# Make sure we don't have nans
A_ : Any = after_outputs[0].cpu().numpy()
A_ : Tuple = 0
A_ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = ViTMAEModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def a__ ( ) -> Optional[Any]:
A_ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
np.random.seed(2 )
A_ : str = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(__magic_name__ )
A_ : Any = self.default_image_processor
A_ : Dict = prepare_img()
A_ : List[Any] = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ : Any = ViTMAEConfig()
A_ : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ : str = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A_ : str = model(**__magic_name__ , noise=torch.from_numpy(__magic_name__ ).to(device=__magic_name__ ) )
# verify the logits
A_ : Optional[int] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
A_ : int = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__magic_name__ ) , atol=1e-4 ) )
| 236 | import doctest
from collections import deque
import numpy as np
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
A_ : List[Any] = [2, 1, 2, -1]
A_ : Tuple = [1, 2, 3, 4]
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : int = len(self.first_signal )
A_ : str = len(self.second_signal )
A_ : Optional[int] = max(__magic_name__ , __magic_name__ )
# create a zero matrix of max_length x max_length
A_ : Optional[Any] = [[0] * max_length for i in range(__magic_name__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__magic_name__ ):
A_ : int = deque(self.second_signal )
rotated_signal.rotate(__magic_name__ )
for j, item in enumerate(__magic_name__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
A_ : List[Any] = np.matmul(np.transpose(__magic_name__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__magic_name__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 236 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.