code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class a ( lowercase__ ):
"""simple docstring"""
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
__UpperCAmelCase : List[Any] = """pt"""
__UpperCAmelCase : List[Any] = """tf"""
def UpperCAmelCase ( self : Tuple , __lowercase : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Dict = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Any = TFAutoModel.from_pretrained(self.test_model , from_pt=__lowercase )
model_tf.save_pretrained(__lowercase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__UpperCAmelCase : Optional[int] = """mock_framework"""
# Framework provided - return whatever the user provides
__UpperCAmelCase : int = FeaturesManager.determine_framework(self.test_model , __lowercase )
self.assertEqual(__lowercase , __lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__lowercase )
__UpperCAmelCase : List[str] = FeaturesManager.determine_framework(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__lowercase )
__UpperCAmelCase : List[str] = FeaturesManager.determine_framework(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> int:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__lowercase )
__UpperCAmelCase : Optional[Any] = FeaturesManager.determine_framework(__lowercase )
self.assertEqual(__lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__lowercase )
__UpperCAmelCase : str = FeaturesManager.determine_framework(__lowercase )
self.assertEqual(__lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Any = FeaturesManager.determine_framework(__lowercase )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = MagicMock(return_value=__lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , __lowercase ):
__UpperCAmelCase : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__UpperCAmelCase : Dict = MagicMock(return_value=__lowercase )
with patch("""transformers.onnx.features.is_torch_available""" , __lowercase ):
__UpperCAmelCase : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_tf )
# Both in environment -> use PyTorch
__UpperCAmelCase : List[str] = MagicMock(return_value=__lowercase )
__UpperCAmelCase : int = MagicMock(return_value=__lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , __lowercase ), patch(
"""transformers.onnx.features.is_torch_available""" , __lowercase ):
__UpperCAmelCase : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_pt )
# Both not in environment -> raise error
__UpperCAmelCase : Optional[int] = MagicMock(return_value=__lowercase )
__UpperCAmelCase : Tuple = MagicMock(return_value=__lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , __lowercase ), patch(
"""transformers.onnx.features.is_torch_available""" , __lowercase ):
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Tuple = FeaturesManager.determine_framework(self.test_model )
| 63
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63
| 1
|
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
if not sentence:
return ""
lowerCAmelCase_ : Union[str, Any] =dict(zip(_lowerCamelCase , _lowerCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 706
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class _snake_case ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[Any] = GPTSwaTokenizer
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : List[Any] = True
_UpperCamelCase : List[str] = False
def __A ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Union[str, Any] =GPTSwaTokenizer(UpperCamelCase_ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : List[str] , UpperCamelCase_ : str ):
lowerCAmelCase_ : Optional[int] ='''This is a test'''
lowerCAmelCase_ : int ='''This is a test'''
return input_text, output_text
def __A ( self : List[str] ):
lowerCAmelCase_ : List[str] ='''<s>'''
lowerCAmelCase_ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCamelCase_ ) , 2000 )
def __A ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def __A ( self : Union[str, Any] ):
lowerCAmelCase_ : int =GPTSwaTokenizer(UpperCamelCase_ )
lowerCAmelCase_ : Any =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [465, 287, 265, 631, 842] )
lowerCAmelCase_ : List[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
UpperCamelCase_ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
lowerCAmelCase_ : Dict =tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase_ : int =tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
# fmt: off
self.assertListEqual(
UpperCamelCase_ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def __A ( self : str ):
lowerCAmelCase_ : List[Any] =GPTSwaTokenizer(UpperCamelCase_ )
lowerCAmelCase_ : str =['''This is a test''', '''I was born in 92000, and this is falsé.''']
lowerCAmelCase_ : List[Any] =[
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase_ ) , UpperCamelCase_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase_ ) , UpperCamelCase_ )
@slow
def __A ( self : List[Any] ):
lowerCAmelCase_ : Union[str, Any] =[
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
lowerCAmelCase_ : List[str] ={'''input_ids''': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=UpperCamelCase_ , )
| 305
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_2 , _a=7 , _a=True , _a=True , _a=True , _a=9_9 , _a=3_2 , _a=3_2 , _a=2 , _a=4 , _a=3_7 , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=0.02 , _a=0 , _a=None , ) -> Optional[int]:
_a : Any = parent
_a : Dict = batch_size
_a : Any = seq_length
_a : Optional[int] = is_training
_a : Any = use_input_mask
_a : Dict = use_labels
_a : Optional[int] = vocab_size
_a : List[str] = hidden_size
_a : List[Any] = projection_dim
_a : Optional[int] = num_hidden_layers
_a : Dict = num_attention_heads
_a : Optional[Any] = intermediate_size
_a : Optional[Any] = dropout
_a : Optional[Any] = attention_dropout
_a : Union[str, Any] = max_position_embeddings
_a : List[Any] = initializer_range
_a : Optional[int] = scope
_a : List[str] = bos_token_id
def __lowercase ( self ) -> Any:
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict = None
if self.use_input_mask:
_a : Any = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_a : List[Any] = input_mask.numpy()
_a , _a : Tuple = input_mask.shape
_a : str = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__lowercase ):
_a : List[str] = 1
_a : List[Any] = 0
_a : Optional[int] = self.get_config()
return config, input_ids, tf.convert_to_tensor(__lowercase )
def __lowercase ( self ) -> Optional[int]:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __lowercase ( self , _a , _a , _a ) -> List[Any]:
_a : Tuple = TFBlipTextModel(config=__lowercase )
_a : Optional[int] = model(__lowercase , attention_mask=__lowercase , training=__lowercase )
_a : int = model(__lowercase , training=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowercase ( self ) -> str:
_a : int = self.prepare_config_and_inputs()
_a , _a , _a : int = config_and_inputs
_a : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
def __lowercase ( self ) -> List[Any]:
_a : str = BlipTextModelTester(self )
_a : Optional[Any] = ConfigTester(self , config_class=__lowercase , hidden_size=3_7 )
def __lowercase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowercase ( self ) -> str:
pass
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __lowercase ( self ) -> Dict:
pass
@slow
def __lowercase ( self ) -> Tuple:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = TFBlipTextModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __lowercase ( self , _a=True ) -> Optional[int]:
super().test_pt_tf_model_equivalence(allow_missing_keys=__lowercase )
| 14
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
for i in range(0, lowercase__ ):
for _ in range(0, n - i - 1 ): # printing spaces
print(' ', end='' )
for _ in range(0, i + 1 ): # printing stars
print('* ', end='' )
print()
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
for i in range(lowercase__, 0, -1 ):
for _ in range(lowercase__, 0, -1 ): # printing stars
print('* ', end='' )
print()
for _ in range(n - i + 1, 0, -1 ): # printing spaces
print(' ', end='' )
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowercase__ ) # upper half
reverse_floyd(lowercase__ ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
UpperCAmelCase = 1
while K:
UpperCAmelCase = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
UpperCAmelCase = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 119
| 0
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
lowerCAmelCase__ : Tuple = x
lowerCAmelCase__ : Union[str, Any] = y
for step in range(_lowercase ): # noqa: B007
lowerCAmelCase__ : Any = a * a - b * b + x
lowerCAmelCase__ : str = 2 * a * b + y
lowerCAmelCase__ : Dict = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowercase_ ( __UpperCAmelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowercase_ ( __UpperCAmelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_lowercase , 1 , 1 ) )
def lowercase_ ( __UpperCAmelCase = 800 , __UpperCAmelCase = 600 , __UpperCAmelCase = -0.6 , __UpperCAmelCase = 0 , __UpperCAmelCase = 3.2 , __UpperCAmelCase = 50 , __UpperCAmelCase = True , ) -> Image.Image:
lowerCAmelCase__ : List[str] = Image.new("""RGB""" , (image_width, image_height) )
lowerCAmelCase__ : Union[str, Any] = img.load()
# loop through the image-coordinates
for image_x in range(_lowercase ):
for image_y in range(_lowercase ):
# determine the figure-coordinates based on the image-coordinates
lowerCAmelCase__ : List[Any] = figure_width / image_width * image_height
lowerCAmelCase__ : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCAmelCase__ : List[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCAmelCase__ : Union[str, Any] = get_distance(_lowercase , _lowercase , _lowercase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCAmelCase__ : List[Any] = get_color_coded_rgb(_lowercase )
else:
lowerCAmelCase__ : List[Any] = get_black_and_white_rgb(_lowercase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_A = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 709
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
@dataclass
class _lowerCamelCase :
_lowerCamelCase :nn.Module
_lowerCamelCase :List[nn.Module] = field(default_factory=a_ )
_lowerCamelCase :list = field(default_factory=a_ )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Tensor , UpperCamelCase : Tensor ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase , nn.Convad ) or isinstance(UpperCamelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase )
def __call__( self : int , UpperCamelCase : Tensor ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowerCamelCase :
_lowerCamelCase :nn.Module
_lowerCamelCase :nn.Module
_lowerCamelCase :int = 0
_lowerCamelCase :List = field(default_factory=a_ )
_lowerCamelCase :List = field(default_factory=a_ )
def __call__( self : str , UpperCamelCase : Tensor ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Tracker(self.dest )(UpperCamelCase ).parametrized
lowerCAmelCase__ : Union[str, Any] = Tracker(self.src )(UpperCamelCase ).parametrized
lowerCAmelCase__ : Any = list(filter(lambda UpperCamelCase : type(UpperCamelCase ) not in self.src_skip , UpperCamelCase ) )
lowerCAmelCase__ : int = list(filter(lambda UpperCamelCase : type(UpperCamelCase ) not in self.dest_skip , UpperCamelCase ) )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(UpperCamelCase )} operations while"""
f""" destination module has {len(UpperCamelCase )}.""" )
for dest_m, src_m in zip(UpperCamelCase , UpperCamelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> List[str]:
print(f"""Converting {name}...""" )
with torch.no_grad():
lowerCAmelCase__ : Any = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
lowerCAmelCase__ : int = ResNetForImageClassification(__UpperCAmelCase ).eval()
lowerCAmelCase__ : List[str] = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
lowerCAmelCase__ : str = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
lowerCAmelCase__ : int = f"""resnet{'-'.join(name.split('resnet' ) )}"""
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
lowerCAmelCase__ : Tuple = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__UpperCAmelCase , )
print(f"""Pushed {checkpoint_name}""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> List[str]:
lowerCAmelCase__ : Dict = """imagenet-1k-id2label.json"""
lowerCAmelCase__ : Any = 1000
lowerCAmelCase__ : Optional[int] = (1, num_labels)
lowerCAmelCase__ : List[Any] = """huggingface/label-files"""
lowerCAmelCase__ : int = num_labels
lowerCAmelCase__ : Any = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase__ : Optional[Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[int] = idalabel
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 507
| 0
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __UpperCamelCase ( _lowerCAmelCase ):
# to overwrite at feature extractactor specific tests
__snake_case :Optional[int] = None
__snake_case :Dict = None
@property
def _a ( self : str ) -> List[str]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """feature_size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """sampling_rate""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """padding_value""" ) )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _a ( self : str , _lowerCAmelCase : List[Any]=False ) -> int:
"""simple docstring"""
def _inputs_have_equal_length(_lowerCAmelCase : int ):
__lowercase = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = self.feat_extract_tester.seq_length_diff
__lowercase = self.feat_extract_tester.max_seq_length + pad_diff
__lowercase = self.feat_extract_tester.min_seq_length
__lowercase = self.feat_extract_tester.batch_size
__lowercase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowercase = feat_extract.pad(_lowerCAmelCase , padding=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
__lowercase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""max_length""" )[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowercase = feat_extract.pad(_lowerCAmelCase , pad_to_multiple_of=10 )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , pad_to_multiple_of=10 )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = input_a[input_name]
self.assertTrue(all(len(_lowerCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_lowerCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__lowercase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _a ( self : Tuple , _lowerCAmelCase : str=False ) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(_lowerCAmelCase : Tuple ):
__lowercase = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase : Any , _lowerCAmelCase : str ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
__lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to smallest with np
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=_lowerCAmelCase , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to middle
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""longest""" , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""longest""" , truncation=_lowerCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , truncation=_lowerCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowercase = 12
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , truncation=_lowerCAmelCase , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , )
__lowercase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowercase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__lowercase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self._check_padding(numpify=_lowerCAmelCase )
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
self._check_padding(numpify=_lowerCAmelCase )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
self._check_truncation(numpify=_lowerCAmelCase )
def _a ( self : str ) -> str:
"""simple docstring"""
self._check_truncation(numpify=_lowerCAmelCase )
@require_torch
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _a ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_lowerCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_lowerCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = min(_lowerCAmelCase )
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 80
|
'''simple docstring'''
from __future__ import annotations
import math
UpperCAmelCase__ = '''2020.9.26'''
UpperCAmelCase__ = '''xcodz-dot, cclaus, dhruvmanila'''
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : float,_SCREAMING_SNAKE_CASE : float,_SCREAMING_SNAKE_CASE : float,_SCREAMING_SNAKE_CASE : float,_SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if not all(isinstance(_SCREAMING_SNAKE_CASE,(float, int) ) for val in locals().values() ):
__A= f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(_SCREAMING_SNAKE_CASE )
__A= ((x * distance) / (z + distance)) * scale
__A= ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : float,_SCREAMING_SNAKE_CASE : float,_SCREAMING_SNAKE_CASE : float,_SCREAMING_SNAKE_CASE : str,_SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE ):
raise TypeError('Axis must be a str' )
__A= locals()
del input_variables["axis"]
if not all(isinstance(_SCREAMING_SNAKE_CASE,(float, int) ) for val in input_variables.values() ):
__A= (
'Input values except axis must either be float or int: '
f"""{list(input_variables.values() )}"""
)
raise TypeError(_SCREAMING_SNAKE_CASE )
__A= (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__A= x * math.cos(_SCREAMING_SNAKE_CASE ) - y * math.sin(_SCREAMING_SNAKE_CASE )
__A= y * math.cos(_SCREAMING_SNAKE_CASE ) + x * math.sin(_SCREAMING_SNAKE_CASE )
__A= z
elif axis == "x":
__A= y * math.cos(_SCREAMING_SNAKE_CASE ) - z * math.sin(_SCREAMING_SNAKE_CASE )
__A= z * math.cos(_SCREAMING_SNAKE_CASE ) + y * math.sin(_SCREAMING_SNAKE_CASE )
__A= x
elif axis == "y":
__A= x * math.cos(_SCREAMING_SNAKE_CASE ) - z * math.sin(_SCREAMING_SNAKE_CASE )
__A= z * math.cos(_SCREAMING_SNAKE_CASE ) + x * math.sin(_SCREAMING_SNAKE_CASE )
__A= y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
| 186
| 0
|
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCamelCase__ ( a , a ):
__snake_case = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__snake_case = Image.open(requests.get(a , stream=a ).raw ).convert('RGB' )
__snake_case = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
__snake_case = transform(a ).unsqueeze(0 ).to(a )
return image
def lowerCamelCase__ ( a ):
if "visual_encoder" in key:
__snake_case = re.sub('visual_encoder*' , 'vision_model.encoder' , a )
if "blocks" in key:
__snake_case = re.sub(r'blocks' , 'layers' , a )
if "attn" in key:
__snake_case = re.sub(r'attn' , 'self_attn' , a )
if "norm1" in key:
__snake_case = re.sub(r'norm1' , 'layer_norm1' , a )
if "norm2" in key:
__snake_case = re.sub(r'norm2' , 'layer_norm2' , a )
if "encoder.norm" in key:
__snake_case = re.sub(r'encoder.norm' , 'post_layernorm' , a )
if "encoder.patch_embed.proj" in key:
__snake_case = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , a )
if "encoder.pos_embed" in key:
__snake_case = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , a )
if "encoder.cls_token" in key:
__snake_case = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , a )
if "self_attn" in key:
__snake_case = re.sub(r'self_attn.proj' , 'self_attn.projection' , a )
return key
@torch.no_grad()
def lowerCamelCase__ ( a , a=None ):
if config_path is not None:
__snake_case = BlipConfig.from_pretrained(a )
else:
__snake_case = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__snake_case = BlipForConditionalGeneration(a ).eval()
__snake_case = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__snake_case = blip_decoder(pretrained=a , image_size=384 , vit='base' )
__snake_case = pt_model.eval()
__snake_case = pt_model.state_dict()
for key in modified_state_dict.copy():
__snake_case = modified_state_dict.pop(a )
__snake_case = rename_key(a )
__snake_case = value
hf_model.load_state_dict(a )
__snake_case = 384
__snake_case = load_demo_image(image_size=a , device='cpu' )
__snake_case = BertTokenizer.from_pretrained('bert-base-uncased' )
__snake_case = tokenizer(['a picture of'] ).input_ids
__snake_case = hf_model.generate(a , a )
assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
__snake_case = hf_model.generate(a )
assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(a )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__snake_case = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__snake_case = blip_vqa(pretrained=a , image_size=a , vit='base' )
vqa_model.eval()
__snake_case = vqa_model.state_dict()
for key in modified_state_dict.copy():
__snake_case = modified_state_dict.pop(a )
__snake_case = rename_key(a )
__snake_case = value
__snake_case = BlipForQuestionAnswering(a )
hf_vqa_model.load_state_dict(a )
__snake_case = ['How many dogs are in this image?']
__snake_case = tokenizer(a , return_tensors='pt' ).input_ids
__snake_case = hf_vqa_model.generate(a , a )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__snake_case = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__snake_case = blip_itm(pretrained=a , image_size=a , vit='base' )
itm_model.eval()
__snake_case = itm_model.state_dict()
for key in modified_state_dict.copy():
__snake_case = modified_state_dict.pop(a )
__snake_case = rename_key(a )
__snake_case = value
__snake_case = BlipForImageTextRetrieval(a )
__snake_case = ['A picture of a woman with a dog sitting in a beach']
__snake_case = tokenizer(
a , return_tensors='pt' , padding='max_length' , truncation=a , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(a )
hf_itm_model.eval()
__snake_case = hf_itm_model(a , a , use_itm_head=a )
__snake_case = hf_itm_model(a , a , use_itm_head=a )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_lowercase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 427
|
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( UpperCAmelCase__ ):
lowercase_ : int = ['''image_processor''', '''tokenizer''']
lowercase_ : Dict = '''AutoImageProcessor'''
lowercase_ : Dict = '''AutoTokenizer'''
def __init__( self : int , __lowerCAmelCase : Any=None , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : List[Any] ):
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __lowerCAmelCase , )
__snake_case = kwargs.pop('feature_extractor' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = self.image_processor
__snake_case = False
def __call__( self : Dict , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[str] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = kwargs.pop('images' , __lowerCAmelCase )
__snake_case = kwargs.pop('text' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__snake_case = args[0]
__snake_case = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__snake_case = self.image_processor(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
__snake_case = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
__snake_case = encodings['input_ids']
return inputs
def lowercase__ ( self : str , *__lowerCAmelCase : str , **__lowerCAmelCase : Tuple ):
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def lowercase__ ( self : Union[str, Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Any ):
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def lowercase__ ( self : Any ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__snake_case = True
__snake_case = self.tokenizer
yield
__snake_case = self.image_processor
__snake_case = False
def lowercase__ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[Any]=None ):
if added_vocab is None:
__snake_case = self.tokenizer.get_added_vocab()
__snake_case = {}
while tokens:
__snake_case = re.search(r'<s_(.*?)>' , __lowerCAmelCase , re.IGNORECASE )
if start_token is None:
break
__snake_case = start_token.group(1 )
__snake_case = re.search(rF'</s_{key}>' , __lowerCAmelCase , re.IGNORECASE )
__snake_case = start_token.group()
if end_token is None:
__snake_case = tokens.replace(__lowerCAmelCase , '' )
else:
__snake_case = end_token.group()
__snake_case = re.escape(__lowerCAmelCase )
__snake_case = re.escape(__lowerCAmelCase )
__snake_case = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , __lowerCAmelCase , re.IGNORECASE )
if content is not None:
__snake_case = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__snake_case = self.tokenajson(__lowerCAmelCase , is_inner_value=__lowerCAmelCase , added_vocab=__lowerCAmelCase )
if value:
if len(__lowerCAmelCase ) == 1:
__snake_case = value[0]
__snake_case = value
else: # leaf nodes
__snake_case = []
for leaf in content.split(r'<sep/>' ):
__snake_case = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__snake_case = leaf[1:-2] # for categorical special tokens
output[key].append(__lowerCAmelCase )
if len(output[key] ) == 1:
__snake_case = output[key][0]
__snake_case = tokens[tokens.find(__lowerCAmelCase ) + len(__lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__lowerCAmelCase , added_vocab=__lowerCAmelCase )
if len(__lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __lowerCAmelCase , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __lowerCAmelCase , )
return self.image_processor
| 427
| 1
|
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
return "\n".join(
F'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 61
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase_ = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class A (__UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = """albert"""
def __init__( self , lowercase_=3_0000 , lowercase_=128 , lowercase_=4096 , lowercase_=12 , lowercase_=1 , lowercase_=64 , lowercase_=1_6384 , lowercase_=1 , lowercase_="gelu_new" , lowercase_=0 , lowercase_=0 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1E-12 , lowercase_=0.1 , lowercase_="absolute" , lowercase_=0 , lowercase_=2 , lowercase_=3 , **lowercase_ , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
_snake_case : Any = vocab_size
_snake_case : Union[str, Any] = embedding_size
_snake_case : Union[str, Any] = hidden_size
_snake_case : Any = num_hidden_layers
_snake_case : Optional[Any] = num_hidden_groups
_snake_case : List[Any] = num_attention_heads
_snake_case : Dict = inner_group_num
_snake_case : List[Any] = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : Optional[Any] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : List[str] = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : Optional[Any] = initializer_range
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : List[Any] = classifier_dropout_prob
_snake_case : List[Any] = position_embedding_type
class A (__UpperCAmelCase ):
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_snake_case : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 326
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : torch.FloatTensor
_snake_case : torch.FloatTensor
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : int = 1
@register_to_config
def __init__( self :Optional[int] , lowerCamelCase__ :int = 20_00 , lowerCamelCase__ :float = 0.15 , lowerCamelCase__ :float = 0.01 , lowerCamelCase__ :float = 1348.0 , lowerCamelCase__ :float = 1e-5 , lowerCamelCase__ :int = 1 , ):
# standard deviation of the initial noise distribution
UpperCamelCase__ :Union[str, Any] = sigma_max
# setable values
UpperCamelCase__ :Tuple = None
self.set_sigmas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] , lowerCamelCase__ :torch.FloatTensor , lowerCamelCase__ :Optional[int] = None ):
return sample
def __a ( self :Any , lowerCamelCase__ :int , lowerCamelCase__ :float = None , lowerCamelCase__ :Union[str, torch.device] = None ):
UpperCamelCase__ :Optional[int] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase__ :Optional[Any] = torch.linspace(1 , lowerCamelCase__ , lowerCamelCase__ , device=lowerCamelCase__ )
def __a ( self :Dict , lowerCamelCase__ :int , lowerCamelCase__ :float = None , lowerCamelCase__ :float = None , lowerCamelCase__ :float = None ):
UpperCamelCase__ :List[Any] = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase__ :Any = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase__ :List[str] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase__ :str = torch.exp(torch.linspace(math.log(lowerCamelCase__ ) , math.log(lowerCamelCase__ ) , lowerCamelCase__ ) )
UpperCamelCase__ :str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __a ( self :Optional[int] , lowerCamelCase__ :str , lowerCamelCase__ :Tuple ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __a ( self :Dict , lowerCamelCase__ :torch.FloatTensor , lowerCamelCase__ :int , lowerCamelCase__ :torch.FloatTensor , lowerCamelCase__ :Optional[torch.Generator] = None , lowerCamelCase__ :bool = True , ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
UpperCamelCase__ :List[str] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase__ :Optional[int] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase__ :Optional[int] = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase__ :Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase__ :Tuple = self.get_adjacent_sigma(lowerCamelCase__ , lowerCamelCase__ ).to(sample.device )
UpperCamelCase__ :Optional[int] = torch.zeros_like(lowerCamelCase__ )
UpperCamelCase__ :Dict = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase__ :List[str] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase__ :Dict = diffusion.unsqueeze(-1 )
UpperCamelCase__ :List[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase__ :Tuple = randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCamelCase__ , device=sample.device , dtype=sample.dtype )
UpperCamelCase__ :Tuple = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase__ :Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase__ , prev_sample_mean=lowerCamelCase__ )
def __a ( self :Union[str, Any] , lowerCamelCase__ :torch.FloatTensor , lowerCamelCase__ :torch.FloatTensor , lowerCamelCase__ :Optional[torch.Generator] = None , lowerCamelCase__ :bool = True , ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase__ :Any = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCamelCase__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase__ :Dict = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase__ :Optional[int] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase__ :Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase__ :Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase__ :List[Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase__ :Union[str, Any] = step_size.unsqueeze(-1 )
UpperCamelCase__ :Dict = sample + step_size * model_output
UpperCamelCase__ :str = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase__ )
def __a ( self :Optional[int] , lowerCamelCase__ :torch.FloatTensor , lowerCamelCase__ :torch.FloatTensor , lowerCamelCase__ :torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase__ :Union[str, Any] = timesteps.to(original_samples.device )
UpperCamelCase__ :int = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase__ :Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase__ ) * sigmas[:, None, None, None]
)
UpperCamelCase__ :List[Any] = noise + original_samples
return noisy_samples
def __len__( self :List[Any] ):
return self.config.num_train_timesteps
| 383
|
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> int:
if not nums:
return 0
UpperCamelCase__ :Dict = nums[0]
UpperCamelCase__ :Dict = 0
for num in nums[1:]:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = (
max_excluding + num,
max(lowercase__ , lowercase__ ),
)
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 383
| 1
|
import colorsys
from PIL import Image # type: ignore
def UpperCamelCase ( _a , _a , _a ) -> float:
'''simple docstring'''
lowercase_ :List[str] = x
lowercase_ :Optional[Any] = y
for step in range(_a ): # noqa: B007
lowercase_ :Union[str, Any] = a * a - b * b + x
lowercase_ :Union[str, Any] = 2 * a * b + y
lowercase_ :Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCamelCase ( _a ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def UpperCamelCase ( _a ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_a , 1 , 1 ) )
def UpperCamelCase ( _a = 8_0_0 , _a = 6_0_0 , _a = -0.6 , _a = 0 , _a = 3.2 , _a = 5_0 , _a = True , ) -> Image.Image:
'''simple docstring'''
lowercase_ :int = Image.new('''RGB''' , (image_width, image_height) )
lowercase_ :int = img.load()
# loop through the image-coordinates
for image_x in range(_a ):
for image_y in range(_a ):
# determine the figure-coordinates based on the image-coordinates
lowercase_ :Optional[Any] = figure_width / image_width * image_height
lowercase_ :List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowercase_ :str = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowercase_ :Dict = get_distance(_a , _a , _a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowercase_ :Tuple = get_color_coded_rgb(_a )
else:
lowercase_ :Dict = get_black_and_white_rgb(_a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
SCREAMING_SNAKE_CASE : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 257
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir("fixtures")
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# A mock response for an HTTP head request to emulate server down
lowercase_ :str = mock.Mock()
lowercase_ :Dict = 500
lowercase_ :List[Any] = {}
lowercase_ :str = HTTPError
lowercase_ :Optional[int] = {}
# Download this model to make sure it's in the cache.
lowercase_ :Any = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=UpperCamelCase_ ) as mock_head:
lowercase_ :Dict = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase ( self ):
# This test is for deprecated behavior and can be removed in v5
lowercase_ :Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase ( cls ):
lowercase_ :Optional[Any] = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def UpperCamelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def UpperCamelCase ( self ):
lowercase_ :int = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
lowercase_ :List[Any] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase_ , repo_id='''test-feature-extractor''' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
lowercase_ :Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def UpperCamelCase ( self ):
lowercase_ :Tuple = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
lowercase_ :str = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
lowercase_ :Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def UpperCamelCase ( self ):
CustomFeatureExtractor.register_for_auto_class()
lowercase_ :str = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
lowercase_ :str = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=UpperCamelCase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 257
| 1
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__magic_name__ = logging.getLogger(__name__)
def _lowerCAmelCase ( A__: List[Any] , A__: List[Any] ):
'''simple docstring'''
UpperCAmelCase = np.argmax(A__ , axis=1 )
return np.sum(outputs == labels )
def _lowerCAmelCase ( A__: Tuple ):
'''simple docstring'''
with open(A__ , encoding='''utf_8''' ) as f:
UpperCAmelCase = csv.reader(A__ )
UpperCAmelCase = []
next(A__ ) # skip the first line
for line in tqdm(A__ ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _lowerCAmelCase ( A__: Dict , A__: List[Any] , A__: Tuple , A__: List[str] , A__: Optional[int] , A__: Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = []
for dataset in encoded_datasets:
UpperCAmelCase = len(A__ )
UpperCAmelCase = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
UpperCAmelCase = np.zeros((n_batch, 2) , dtype=np.intaa )
UpperCAmelCase = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
UpperCAmelCase = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(A__ ):
UpperCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase = with_conta
UpperCAmelCase = with_conta
UpperCAmelCase = len(A__ ) - 1
UpperCAmelCase = len(A__ ) - 1
UpperCAmelCase = with_conta
UpperCAmelCase = with_conta
UpperCAmelCase = mc_label
UpperCAmelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(A__ ) for t in all_inputs ) )
return tensor_datasets
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=A__ , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=A__ , type=A__ , required=A__ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=A__ , default='''''' )
parser.add_argument('''--eval_dataset''' , type=A__ , default='''''' )
parser.add_argument('''--seed''' , type=A__ , default=42 )
parser.add_argument('''--num_train_epochs''' , type=A__ , default=3 )
parser.add_argument('''--train_batch_size''' , type=A__ , default=8 )
parser.add_argument('''--eval_batch_size''' , type=A__ , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=A__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=A__ , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=A__ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A__ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=A__ , default=6.2_5E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=A__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=A__ , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=A__ , default=0.01 )
parser.add_argument('''--lm_coef''' , type=A__ , default=0.9 )
parser.add_argument('''--n_valid''' , type=A__ , default=374 )
parser.add_argument('''--server_ip''' , type=A__ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=A__ , default='''''' , help='''Can be used for distant debugging.''' )
UpperCAmelCase = parser.parse_args()
print(A__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
UpperCAmelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
UpperCAmelCase = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(A__ , A__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
UpperCAmelCase = ['''_start_''', '''_delimiter_''', '''_classify_''']
UpperCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(A__ )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(A__ )
UpperCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(A__ ) )
model.to(A__ )
# Load and encode the datasets
def tokenize_and_encode(A__: Optional[int] ):
if isinstance(A__ , A__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(A__ ) )
elif isinstance(A__ , A__ ):
return obj
return [tokenize_and_encode(A__ ) for o in obj]
logger.info('''Encoding dataset...''' )
UpperCAmelCase = load_rocstories_dataset(args.train_dataset )
UpperCAmelCase = load_rocstories_dataset(args.eval_dataset )
UpperCAmelCase = (train_dataset, eval_dataset)
UpperCAmelCase = tokenize_and_encode(A__ )
# Compute the max input length for the Transformer
UpperCAmelCase = model.config.n_positions // 2 - 2
UpperCAmelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
UpperCAmelCase = min(A__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
UpperCAmelCase = pre_process_datasets(A__ , A__ , A__ , *A__ )
UpperCAmelCase , UpperCAmelCase = tensor_datasets[0], tensor_datasets[1]
UpperCAmelCase = TensorDataset(*A__ )
UpperCAmelCase = RandomSampler(A__ )
UpperCAmelCase = DataLoader(A__ , sampler=A__ , batch_size=args.train_batch_size )
UpperCAmelCase = TensorDataset(*A__ )
UpperCAmelCase = SequentialSampler(A__ )
UpperCAmelCase = DataLoader(A__ , sampler=A__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
UpperCAmelCase = args.max_steps
UpperCAmelCase = args.max_steps // (len(A__ ) // args.gradient_accumulation_steps) + 1
else:
UpperCAmelCase = len(A__ ) // args.gradient_accumulation_steps * args.num_train_epochs
UpperCAmelCase = list(model.named_parameters() )
UpperCAmelCase = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
UpperCAmelCase = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
UpperCAmelCase = AdamW(A__ , lr=args.learning_rate , eps=args.adam_epsilon )
UpperCAmelCase = get_linear_schedule_with_warmup(
A__ , num_warmup_steps=args.warmup_steps , num_training_steps=A__ )
if args.do_train:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = tqdm(A__ , desc='''Training''' )
for step, batch in enumerate(A__ ):
UpperCAmelCase = tuple(t.to(A__ ) for t in batch )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = batch
UpperCAmelCase = model(A__ , mc_token_ids=A__ , lm_labels=A__ , mc_labels=A__ )
UpperCAmelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
UpperCAmelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
UpperCAmelCase = '''Training loss: {:.2e} lr: {:.2e}'''.format(A__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
UpperCAmelCase = model.module if hasattr(A__ , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
UpperCAmelCase = os.path.join(args.output_dir , A__ )
UpperCAmelCase = os.path.join(args.output_dir , A__ )
torch.save(model_to_save.state_dict() , A__ )
model_to_save.config.to_json_file(A__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
UpperCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
UpperCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(A__ )
if args.do_eval:
model.eval()
UpperCAmelCase , UpperCAmelCase = 0, 0
UpperCAmelCase , UpperCAmelCase = 0, 0
for batch in tqdm(A__ , desc='''Evaluating''' ):
UpperCAmelCase = tuple(t.to(A__ ) for t in batch )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = batch
with torch.no_grad():
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = model(
A__ , mc_token_ids=A__ , lm_labels=A__ , mc_labels=A__ )
UpperCAmelCase = mc_logits.detach().cpu().numpy()
UpperCAmelCase = mc_labels.to('''cpu''' ).numpy()
UpperCAmelCase = accuracy(A__ , A__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
UpperCAmelCase = eval_loss / nb_eval_steps
UpperCAmelCase = eval_accuracy / nb_eval_examples
UpperCAmelCase = tr_loss / nb_tr_steps if args.do_train else None
UpperCAmelCase = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
UpperCAmelCase = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(A__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , A__ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 391
|
def _lowerCAmelCase ( A__: float , A__: float , A__: int ):
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 391
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :int = tempfile.mkdtemp()
__magic_name__ :Optional[int] = BlipImageProcessor()
__magic_name__ :Optional[Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__magic_name__ :Union[str, Any] = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
__magic_name__ :List[str] = InstructBlipProcessor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).tokenizer
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).qformer_tokenizer
def A ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__magic_name__ :Tuple = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__magic_name__ :Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__magic_name__ :Optional[int] = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
__magic_name__ :Optional[int] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.get_image_processor()
__magic_name__ :List[Any] = self.get_tokenizer()
__magic_name__ :str = self.get_qformer_tokenizer()
__magic_name__ :Any = InstructBlipProcessor(
tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase )
__magic_name__ :List[Any] = self.prepare_image_inputs()
__magic_name__ :Optional[int] = image_processor(__lowerCAmelCase , return_tensors='''np''' )
__magic_name__ :List[Any] = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.get_image_processor()
__magic_name__ :Optional[Any] = self.get_tokenizer()
__magic_name__ :str = self.get_qformer_tokenizer()
__magic_name__ :str = InstructBlipProcessor(
tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase )
__magic_name__ :Optional[int] = '''lower newer'''
__magic_name__ :Any = processor(text=__lowerCAmelCase )
__magic_name__ :str = tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
__magic_name__ :Any = qformer_tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.get_image_processor()
__magic_name__ :Union[str, Any] = self.get_tokenizer()
__magic_name__ :Union[str, Any] = self.get_qformer_tokenizer()
__magic_name__ :List[Any] = InstructBlipProcessor(
tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase )
__magic_name__ :str = '''lower newer'''
__magic_name__ :Any = self.prepare_image_inputs()
__magic_name__ :Optional[Any] = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.get_image_processor()
__magic_name__ :List[Any] = self.get_tokenizer()
__magic_name__ :int = self.get_qformer_tokenizer()
__magic_name__ :str = InstructBlipProcessor(
tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase )
__magic_name__ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ :Dict = processor.batch_decode(__lowerCAmelCase )
__magic_name__ :Tuple = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.get_image_processor()
__magic_name__ :Dict = self.get_tokenizer()
__magic_name__ :str = self.get_qformer_tokenizer()
__magic_name__ :Any = InstructBlipProcessor(
tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase )
__magic_name__ :Tuple = '''lower newer'''
__magic_name__ :Tuple = self.prepare_image_inputs()
__magic_name__ :int = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
UpperCamelCase__ = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
UpperCamelCase__ = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase__ = """A painting of a squirrel eating a burger """
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
UpperCamelCase__ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase__ = generator.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowerCamelCase ( self ):
UpperCamelCase__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase__ = """A painting of a squirrel eating a burger """
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 619
| 0
|
def _lowerCAmelCase (_lowerCAmelCase = 2_00):
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
UpperCamelCase_ = [0] * (pence + 1)
UpperCamelCase_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowerCAmelCase , pence + 1 , 1):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 710
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _lowerCAmelCase (_lowerCAmelCase):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name)
UpperCAmelCase : Dict ="""
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class _lowercase (a_ ):
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=snake_case__ , required=snake_case__ , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=snake_case__ , required=snake_case__ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=snake_case__ , required=snake_case__ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=snake_case__ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=snake_case__ , default=snake_case__ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=snake_case__ )
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = logging.get_logger("transformers-cli/converting" )
self._logger.info(F"""Loading model {model_type}""" )
UpperCamelCase_ = model_type
UpperCamelCase_ = tf_checkpoint
UpperCamelCase_ = pytorch_dump_output
UpperCamelCase_ = config
UpperCamelCase_ = finetuning_task_name
def _lowerCamelCase ( self ):
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase_ = self._tf_checkpoint
UpperCamelCase_ = ""
else:
UpperCamelCase_ = self._tf_checkpoint
UpperCamelCase_ = ""
convert_transfo_xl_checkpoint_to_pytorch(
snake_case__ , self._config , self._pytorch_dump_output , snake_case__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 504
| 0
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase ="platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ):
if attention_mask is None:
UpperCamelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCamelCase__ : Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCamelCase__ : Union[str, Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase__ : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0.02 , ) -> int:
"""simple docstring"""
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[Any] = batch_size
UpperCamelCase__ : str = seq_length
UpperCamelCase__ : Optional[Any] = is_training
UpperCamelCase__ : List[str] = use_labels
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : List[Any] = hidden_size
UpperCamelCase__ : Tuple = num_hidden_layers
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : Tuple = intermediate_size
UpperCamelCase__ : List[Any] = hidden_act
UpperCamelCase__ : Dict = hidden_dropout_prob
UpperCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase__ : Optional[Any] = max_position_embeddings
UpperCamelCase__ : Tuple = eos_token_id
UpperCamelCase__ : Union[str, Any] = pad_token_id
UpperCamelCase__ : List[str] = bos_token_id
UpperCamelCase__ : List[str] = initializer_range
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCamelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCamelCase__ : List[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , 1 , 2 )
UpperCamelCase__ : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Optional[Any] = prepare_blenderbot_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = 2_0
UpperCamelCase__ : int = model_class_name(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = model.encode(inputs_dict['''input_ids'''] )
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCamelCase__ : Any = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCamelCase__ : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase__ : List[Any] = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCamelCase__ : Dict = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : int = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = 2_0
UpperCamelCase__ : str = model_class_name(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] )
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCamelCase__ : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCamelCase__ : int = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase__ : Tuple = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCamelCase__ : Any = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : int = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 99
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : int = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
UpperCamelCase__ : List[str] = input_ids.shape[0]
UpperCamelCase__ : Any = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = self._get_config_and_data()
UpperCamelCase__ : Dict = FlaxBlenderbotSmallForConditionalGeneration(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = lm_model(input_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : int = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
UpperCamelCase__ : int = FlaxBlenderbotSmallForConditionalGeneration(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
UpperCamelCase__ : Tuple = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
UpperCamelCase__ : Tuple = lm_model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
UpperCamelCase__ : str = shift_tokens_right(__SCREAMING_SNAKE_CASE , 1 , 2 )
UpperCamelCase__ : List[str] = np.equal(__SCREAMING_SNAKE_CASE , 1 ).astype(np.floataa ).sum()
UpperCamelCase__ : Union[str, Any] = np.equal(__SCREAMING_SNAKE_CASE , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__SCREAMING_SNAKE_CASE , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase , UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Any = FlaxBlenderbotSmallModelTester(self )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ : List[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = model_class(__SCREAMING_SNAKE_CASE )
@jax.jit
def encode_jitted(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
return model.encode(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
with self.subTest('''JIT Enabled''' ):
UpperCamelCase__ : Optional[Any] = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCamelCase__ : Any = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCamelCase__ : Any = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return model.decode(
decoder_input_ids=__SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , encoder_outputs=__SCREAMING_SNAKE_CASE , )
with self.subTest('''JIT Enabled''' ):
UpperCamelCase__ : List[Any] = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCamelCase__ : List[Any] = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase__ : Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCamelCase__ : List[str] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCamelCase__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 285
|
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
def get_matched_characters(UpperCamelCase__ , UpperCamelCase__ ) -> str:
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Union[str, Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCamelCase__ : Optional[int] = int(max(0 , i - limit ) )
UpperCamelCase__ : List[str] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCamelCase__ )
UpperCamelCase__ : List[str] = f'''{_stra[0:_stra.index(UpperCamelCase__ )]} {_stra[_stra.index(UpperCamelCase__ ) + 1:]}'''
return "".join(UpperCamelCase__ )
# matching characters
UpperCamelCase__ : Any = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ : Union[str, Any] = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ : List[Any] = len(UpperCamelCase__ )
# transposition
UpperCamelCase__ : str = (
len([(ca, ca) for ca, ca in zip(UpperCamelCase__ , UpperCamelCase__ ) if ca != ca] ) // 2
)
if not match_count:
UpperCamelCase__ : Union[str, Any] = 0.0
else:
UpperCamelCase__ : Optional[int] = (
1
/ 3
* (
match_count / len(UpperCamelCase__ )
+ match_count / len(UpperCamelCase__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCamelCase__ : Dict = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 285
| 1
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__lowerCamelCase = logging.get_logger(__name__)
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ) -> Tuple:
"""simple docstring"""
if "." in tensor_name:
_a : List[Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
_a : int = getattr(UpperCAmelCase , UpperCAmelCase )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
_a : Any = new_module
_a : List[Any] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
_a : List[str] = tensor_name in module._buffers
_a : Any = getattr(UpperCAmelCase , UpperCAmelCase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
_a : List[str] = False
_a : Union[str, Any] = False
if is_buffer or not is_bitsandbytes_available():
_a : Dict = False
_a : Any = False
else:
_a : Optional[int] = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_a : Union[str, Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_a : List[str] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_a : Optional[Any] = old_value.to(UpperCAmelCase )
elif isinstance(UpperCAmelCase , torch.Tensor ):
_a : str = value.to('''cpu''' )
if value.dtype == torch.inta:
_a : List[str] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
_a : List[Any] = torch.tensor(UpperCAmelCase , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , UpperCAmelCase ) and fpaa_statistics is None:
_a : List[Any] = new_value.T
_a : str = old_value.__dict__
if is_abit:
_a : List[str] = bnb.nn.IntaParams(UpperCAmelCase , requires_grad=UpperCAmelCase , **UpperCAmelCase ).to(UpperCAmelCase )
elif is_abit:
_a : Optional[Any] = bnb.nn.Paramsabit(UpperCAmelCase , requires_grad=UpperCAmelCase , **UpperCAmelCase ).to(UpperCAmelCase )
_a : str = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(UpperCAmelCase ) )
else:
if value is None:
_a : Any = old_value.to(UpperCAmelCase )
elif isinstance(UpperCAmelCase , torch.Tensor ):
_a : Tuple = value.to(UpperCAmelCase )
else:
_a : List[str] = torch.tensor(UpperCAmelCase , device=UpperCAmelCase )
if is_buffer:
_a : Optional[int] = new_value
else:
_a : List[str] = nn.Parameter(UpperCAmelCase , requires_grad=old_value.requires_grad )
_a : Optional[int] = new_value
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ) -> Union[str, Any]:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
_a : List[str] = []
current_key_name.append(UpperCAmelCase )
if (isinstance(UpperCAmelCase , nn.Linear ) or isinstance(UpperCAmelCase , UpperCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(UpperCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_a , _a : Optional[int] = module.weight.shape
else:
_a : str = module.in_features
_a : List[str] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_a : int = bnb.nn.LinearabitLt(
UpperCAmelCase , UpperCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_a : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_a : List[str] = bnb.nn.Linearabit(
UpperCAmelCase , UpperCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_a : Any = True
# Store the module class in case we need to transpose the weight later
_a : List[Any] = type(UpperCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(UpperCAmelCase )
if len(list(module.children() ) ) > 0:
_a , _a : Dict = _replace_with_bnb_linear(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , has_been_replaced=UpperCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Dict:
"""simple docstring"""
_a : str = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
_a , _a : Optional[int] = _replace_with_bnb_linear(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def UpperCamelCase__ ( *UpperCAmelCase , **UpperCAmelCase ) -> int:
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , UpperCAmelCase , )
return replace_with_bnb_linear(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase__ ( *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , UpperCAmelCase , )
return set_module_quantized_tensor_to_device(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase__ ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
_a : Dict = deepcopy(UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_a : Dict = find_tied_parameters(UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_a : Optional[int] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_a : str = sum(UpperCAmelCase , [] )
_a : Dict = len(UpperCAmelCase ) > 0
# Check if it is a base model
_a : str = not hasattr(UpperCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : Dict = list(model.named_children() )
_a : Union[str, Any] = [list_modules[-1][0]]
# add last module together with tied weights
_a : str = set(UpperCAmelCase ) - set(UpperCAmelCase )
_a : Any = list(set(UpperCAmelCase ) ) + list(UpperCAmelCase )
# remove ".weight" from the keys
_a : str = ['''.weight''', '''.bias''']
_a : str = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : Optional[int] = name.replace(UpperCAmelCase , '''''' )
filtered_module_names.append(UpperCAmelCase )
return filtered_module_names
| 307
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__lowerCamelCase = TypeVar('T')
__lowerCamelCase = TypeVar('U')
class UpperCamelCase_ ( Generic[T, U] ):
def __init__( self , lowercase , lowercase ) -> Any:
_a : Optional[Any] = key
_a : List[str] = val
_a : DoubleLinkedListNode[T, U] | None = None
_a : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class UpperCamelCase_ ( Generic[T, U] ):
def __init__( self ) -> None:
_a : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase , lowercase )
_a : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase , lowercase )
_a , _a : Optional[Any] = self.rear, self.head
def __repr__( self ) -> str:
_a : List[Any] = ['''DoubleLinkedList''']
_a : List[str] = self.head
while node.next is not None:
rep.append(str(lowercase ) )
_a : Any = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase )
def snake_case__( self , lowercase ) -> None:
_a : int = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a : str = node
_a : List[str] = previous
_a : List[Any] = node
_a : Tuple = self.rear
def snake_case__( self , lowercase ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
_a : str = node.next
_a : Tuple = node.prev
_a : Dict = None
_a : List[str] = None
return node
class UpperCamelCase_ ( Generic[T, U] ):
lowercase = {}
def __init__( self , lowercase ) -> int:
_a : DoubleLinkedList[T, U] = DoubleLinkedList()
_a : Optional[Any] = capacity
_a : Dict = 0
_a : Optional[int] = 0
_a : Dict = 0
_a : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , lowercase ) -> bool:
return key in self.cache
def snake_case__( self , lowercase ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a : DoubleLinkedListNode[T, U] = self.cache[key]
_a : Dict = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase )
return node.val
self.miss += 1
return None
def snake_case__( self , lowercase , lowercase ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a : Optional[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a : Union[str, Any] = DoubleLinkedListNode(lowercase , lowercase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a : List[str] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a : Dict = value
self.list.add(lowercase )
@classmethod
def snake_case__( cls , lowercase = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(lowercase ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a : str = LRUCache(lowercase )
_a : List[str] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a : Optional[int] = func(*lowercase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase , '''cache_info''' , lowercase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 1
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( __UpperCamelCase ) -> Optional[int]:
A__ = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
if metric == "rouge2":
A__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
A__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
A__ = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def _a ( self : Union[str, Any] , _snake_case : pl.Trainer , _snake_case : pl.LightningModule , _snake_case : str , _snake_case : Optional[Any]=True ):
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / 'test_results.txt'
A__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
A__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case , 'a+' ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(_snake_case , torch.Tensor ):
A__ = val.item()
A__ = F'''{key}: {val:.6f}\n'''
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
A__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_snake_case )
@rank_zero_only
def _a ( self : Dict , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self : int , _snake_case : pl.Trainer , _snake_case : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_snake_case , _snake_case , 'test' )
@rank_zero_only
def _a ( self : Optional[Any] , _snake_case : pl.Trainer , _snake_case : List[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 9
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
lowerCamelCase__ = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(a_ )
from datasets import load_dataset
lowerCamelCase__ = load_dataset("""nielsr/rvlcdip-demo""" )
lowerCamelCase__ = dataset["""train"""][0]["""image"""].convert("""RGB""" )
lowerCamelCase__ = image_processor(a_ , return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**a_ )
lowerCamelCase__ = outputs.logits
lowerCamelCase__ = torch.Size((1, 16) )
self.assertEqual(logits.shape , a_ )
lowerCamelCase__ = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=a_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a_ , atol=1e-4 ) )
| 165
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Dict ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=a__ , )
assert hasattr(self , '''env''' )
def a (self : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__snake_case = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__snake_case = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__snake_case = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=a__ , instance_type=self.instance_type , debugger_hook_config=a__ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=a__ , py_version='''py36''' , )
def a (self : Any , a__ : Dict ):
"""simple docstring"""
TrainingJobAnalytics(a__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def a (self : str , a__ : int ):
"""simple docstring"""
__snake_case = self.create_estimator(a__ )
# run training
estimator.fit()
# result dataframe
__snake_case = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__snake_case = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__snake_case = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__snake_case = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , a__ )
| 709
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Tuple , *a__ : Optional[Any] , **a__ : Any ):
"""simple docstring"""
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , a__ , )
super().__init__(*a__ , **a__ )
| 388
| 0
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
@register_to_config
def __init__( self , _a = 16 , _a = 88 , _a = None , _a = None , _a = 1 , _a = 0.0 , _a = 32 , _a = None , _a = False , _a = None , _a = "geglu" , _a = True , _a = True , ) -> List[Any]:
super().__init__()
_A : int = num_attention_heads
_A : List[Any] = attention_head_dim
_A : List[Any] = num_attention_heads * attention_head_dim
_A : Union[str, Any] = in_channels
_A : Union[str, Any] = torch.nn.GroupNorm(num_groups=_a , num_channels=_a , eps=1e-6 , affine=_a )
_A : Tuple = nn.Linear(_a , _a )
# 3. Define transformers blocks
_A : int = nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , cross_attention_dim=_a , activation_fn=_a , attention_bias=_a , double_self_attention=_a , norm_elementwise_affine=_a , )
for d in range(_a )
] )
_A : Union[str, Any] = nn.Linear(_a , _a )
def a__ ( self , _a , _a=None , _a=None , _a=None , _a=1 , _a=None , _a = True , ) -> List[Any]:
_A , _A , _A , _A : int = hidden_states.shape
_A : List[str] = batch_frames // num_frames
_A : Any = hidden_states
_A : Optional[int] = hidden_states[None, :].reshape(_a , _a , _a , _a , _a )
_A : Tuple = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
_A : List[Any] = self.norm(_a )
_A : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _a , _a )
_A : str = self.proj_in(_a )
# 2. Blocks
for block in self.transformer_blocks:
_A : str = block(
_a , encoder_hidden_states=_a , timestep=_a , cross_attention_kwargs=_a , class_labels=_a , )
# 3. Output
_A : int = self.proj_out(_a )
_A : List[Any] = (
hidden_states[None, None, :]
.reshape(_a , _a , _a , _a , _a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
_A : Tuple = hidden_states.reshape(_a , _a , _a , _a )
_A : Tuple = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_a )
| 307
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if digit_amount > 0:
return round(number - int(snake_case_ ),snake_case_ )
return number - int(snake_case_ )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 307
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( a , a , unittest.TestCase ):
__A = IFInpaintingSuperResolutionPipeline
__A = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__A = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _snake_case ( self ):
return self._get_superresolution_dummy_components()
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ):
if str(UpperCAmelCase_ ).startswith("""mps""" ):
lowerCamelCase =torch.manual_seed(UpperCAmelCase_ )
else:
lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCamelCase =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _snake_case ( self ):
self._test_save_load_local()
def _snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 710
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __A ( a ):
def _snake_case ( self ):
lowerCamelCase =SMALL_MODEL_IDENTIFIER
lowerCamelCase ="""pt"""
lowerCamelCase ="""tf"""
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCAmelCase_ )
model_tf.save_pretrained(UpperCAmelCase_ )
def _snake_case ( self ):
lowerCamelCase ="""mock_framework"""
# Framework provided - return whatever the user provides
lowerCamelCase =FeaturesManager.determine_framework(self.test_model , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCAmelCase_ )
lowerCamelCase =FeaturesManager.determine_framework(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCAmelCase_ )
lowerCamelCase =FeaturesManager.determine_framework(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _snake_case ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCAmelCase_ )
lowerCamelCase =FeaturesManager.determine_framework(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCAmelCase_ )
lowerCamelCase =FeaturesManager.determine_framework(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCAmelCase_ ):
lowerCamelCase =FeaturesManager.determine_framework(UpperCAmelCase_ )
def _snake_case ( self ):
lowerCamelCase =MagicMock(return_value=UpperCAmelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCAmelCase_ ):
lowerCamelCase =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCAmelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCamelCase =MagicMock(return_value=UpperCAmelCase_ )
with patch("""transformers.onnx.features.is_torch_available""" , UpperCAmelCase_ ):
lowerCamelCase =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCAmelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
lowerCamelCase =MagicMock(return_value=UpperCAmelCase_ )
lowerCamelCase =MagicMock(return_value=UpperCAmelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCAmelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCAmelCase_ ):
lowerCamelCase =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCAmelCase_ , self.framework_pt )
# Both not in environment -> raise error
lowerCamelCase =MagicMock(return_value=UpperCAmelCase_ )
lowerCamelCase =MagicMock(return_value=UpperCAmelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCAmelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCAmelCase_ ):
with self.assertRaises(UpperCAmelCase_ ):
lowerCamelCase =FeaturesManager.determine_framework(self.test_model )
| 269
| 0
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCAmelCase_ : List[str] = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None ):
require_version(deps[pkg] , lowerCamelCase )
| 21
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def _A ( *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ):
pass
def UpperCAmelCase__ ( __magic_name__ : Image ):
'''simple docstring'''
lowerCAmelCase : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def UpperCAmelCase__ ( __magic_name__ : Image ):
'''simple docstring'''
lowerCAmelCase : Tuple = np.array(__magic_name__ )
lowerCAmelCase : Dict = npimg.shape
return {"hash": hashimage(__magic_name__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase ):
_lowerCAmelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCAmelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _A ( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int ):
lowerCAmelCase : List[str] = MaskGenerationPipeline(model=lowerCamelCase__ , image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _A ( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def _A ( self : Optional[int] ):
pass
@slow
@require_torch
def _A ( self : Optional[int] ):
lowerCAmelCase : Dict = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
lowerCAmelCase : Optional[Any] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=2_5_6 )
# Shortening by hashing
lowerCAmelCase : List[Any] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_2_1},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_0_5_3},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_9_6_7},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_9_3},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_9_0_9},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_8_7_9},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_8_3_4},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_7_1_6},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_6_1_2},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_9_9},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_5_2},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_3_2},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_1_6},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_9_9},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_8_3},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_6_4},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_0_8},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_3_3_5},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_3_2_6},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_2_6_2},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_9_9_9},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_9_8_6},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_9_8_4},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_8_7_3},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def _A ( self : Any ):
lowerCAmelCase : List[str] = '''facebook/sam-vit-huge'''
lowerCAmelCase : List[str] = pipeline('''mask-generation''' , model=lowerCamelCase__ )
lowerCAmelCase : List[Any] = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=2_5_6 )
# Shortening by hashing
lowerCAmelCase : str = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_2_1_0},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_0_5_3},
] , )
| 348
| 0
|
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
UpperCamelCase = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
UpperCamelCase = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
UpperCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
UpperCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
UpperCamelCase = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
UpperCamelCase = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
UpperCamelCase = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
UpperCamelCase = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
UpperCamelCase = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
UpperCamelCase = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
UpperCamelCase = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
UpperCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
UpperCamelCase = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
UpperCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
UpperCamelCase = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
UpperCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
UpperCamelCase = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
UpperCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
UpperCamelCase = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
UpperCamelCase = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
UpperCamelCase = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
UpperCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
UpperCamelCase = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
UpperCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
UpperCamelCase = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
UpperCamelCase = ''''''
UpperCamelCase = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
UpperCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
UpperCamelCase = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
assert ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
with pytest.raises(lowerCamelCase__ , match=re.escape(expected_error.format(path='root' ) ) ):
lowerCamelCase_ : List[str] = ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
with pytest.raises(lowerCamelCase__ , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _a ( lowerCamelCase__ ) -> Tuple:
ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ , suppress_parsing_errors=lowerCamelCase__ )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : Tuple = Path(lowerCamelCase__ ) / 'README.md'
with open(lowerCamelCase__ , 'w+' ) as readme_file:
readme_file.write(lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] = ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : Optional[int] = Path(lowerCamelCase__ ) / 'README.md'
with open(lowerCamelCase__ , 'w+' ) as readme_file:
readme_file.write(lowerCamelCase__ )
lowerCamelCase_ : Any = expected_error.format(path=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ , match=re.escape(lowerCamelCase__ ) ):
lowerCamelCase_ : Any = ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : Tuple = Path(lowerCamelCase__ ) / 'README.md'
with open(lowerCamelCase__ , 'w+' ) as readme_file:
readme_file.write(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] = expected_error.format(path=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ , match=re.escape(lowerCamelCase__ ) ):
ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _a ( lowerCamelCase__ ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : Tuple = Path(lowerCamelCase__ ) / 'README.md'
with open(lowerCamelCase__ , 'w+' ) as readme_file:
readme_file.write(lowerCamelCase__ )
ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ , suppress_parsing_errors=lowerCamelCase__ )
| 714
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 144
| 0
|
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = sorted(zip(__lowerCAmelCase , __lowerCAmelCase ) , key=lambda __lowerCAmelCase : x[0] / x[1] , reverse=__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = [i[0] for i in r], [i[1] for i in r]
lowerCamelCase__ = list(accumulate(__lowerCAmelCase ) )
lowerCamelCase__ = bisect(__lowerCAmelCase , __lowerCAmelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'gpt_bigcode'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = attention_softmax_in_fpaa
lowerCamelCase__ = scale_attention_softmax_in_fpaa
lowerCamelCase__ = multi_query
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
| 50
| 1
|
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCAmelCase__ , n - 1 , lowerCAmelCase__ ) * a) % mod
else:
__a : str = binary_exponentiation(lowerCAmelCase__ , n / 2 , lowerCAmelCase__ )
return (b * b) % mod
# a prime number
lowercase__ =701
lowercase__ =1000000000
lowercase__ =10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 709
|
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
return abs(lowerCAmelCase__ ) if a == 0 else greatest_common_divisor(b % a , lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__a , __a : Any = y, x % y
return abs(lowerCAmelCase__ )
def __UpperCamelCase ( ):
try:
__a : str = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
__a : Optional[int] = int(nums[0] )
__a : List[str] = int(nums[1] )
print(
f"greatest_common_divisor({num_a}, {num_a}) = "
f"{greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )}" )
print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCAmelCase__ , lowerCAmelCase__ )}" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 326
| 0
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__)
class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCAmelCase : bool = None
__lowerCAmelCase : bool = None
class _snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCAmelCase : Optional[Any] = datasets.Audio()
__lowerCAmelCase : Union[str, Any] = 'audio'
__lowerCAmelCase : str = AudioFolderConfig
__lowerCAmelCase : List[str] # definition at the bottom of the script
__lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' )
lowerCamelCase__ : int = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowerCamelCase__ : int = AUDIO_EXTENSIONS
| 12
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str]=3 , snake_case__ : int=3_2 , snake_case__ : int=3 , snake_case__ : str=1_0 , snake_case__ : str=[1_0, 2_0, 3_0, 4_0] , snake_case__ : int=[1, 1, 2, 1] , snake_case__ : List[Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[Any]="relu" , snake_case__ : Optional[int]=3 , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
lowercase :Union[str, Any] = parent
lowercase :Optional[Any] = batch_size
lowercase :Dict = image_size
lowercase :Any = num_channels
lowercase :List[str] = embeddings_size
lowercase :Union[str, Any] = hidden_sizes
lowercase :Any = depths
lowercase :Dict = is_training
lowercase :Any = use_labels
lowercase :Any = hidden_act
lowercase :List[str] = num_labels
lowercase :List[Any] = scope
lowercase :int = len(snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase :Union[str, Any] = self.get_config()
return config, pixel_values
def __snake_case ( self : Dict ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __snake_case ( self : str , snake_case__ : Tuple , snake_case__ : List[Any] ):
'''simple docstring'''
lowercase :Any = FlaxRegNetModel(config=snake_case__ )
lowercase :str = model(snake_case__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __snake_case ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : str ):
'''simple docstring'''
lowercase :Tuple = self.num_labels
lowercase :str = FlaxRegNetForImageClassification(config=snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :int = self.prepare_config_and_inputs()
lowercase , lowercase :Tuple = config_and_inputs
lowercase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : List[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__A : str = False
__A : Tuple = False
__A : Dict = False
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :Dict = FlaxRegNetModelTester(self )
lowercase :Tuple = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __snake_case ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
pass
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Union[str, Any] = model_class(snake_case__ )
lowercase :int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :Tuple = [*signature.parameters.keys()]
lowercase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
lowercase :int = model_class(snake_case__ )
lowercase :Tuple = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase :Dict = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase :str = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase :Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase :List[Any] = model_class(snake_case__ )
@jax.jit
def model_jitted(snake_case__ : str , **snake_case__ : Optional[int] ):
return model(pixel_values=snake_case__ , **snake_case__ )
with self.subTest('''JIT Enabled''' ):
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase () -> Tuple:
lowercase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_flax
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __snake_case ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :int = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
lowercase :Optional[Any] = self.default_image_processor
lowercase :Dict = prepare_img()
lowercase :Any = image_processor(images=snake_case__ , return_tensors='''np''' )
lowercase :List[str] = model(**snake_case__ )
# verify the logits
lowercase :Any = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase :List[Any] = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 677
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = StableDiffusionXLImgaImgPipeline
UpperCAmelCase_ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase_ : Any = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCAmelCase_ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCAmelCase = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0)
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCAmelCase = CLIPTextModel(__lowerCAmelCase)
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCAmelCase)
lowerCAmelCase = CLIPTextModelWithProjection(__lowerCAmelCase)
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCAmelCase)
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=0):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase)).to(__lowerCAmelCase)
lowerCAmelCase = image / 2 + 0.5
if str(__lowerCAmelCase).startswith("""mps"""):
lowerCAmelCase = torch.manual_seed(__lowerCAmelCase)
else:
lowerCAmelCase = torch.Generator(device=__lowerCAmelCase).manual_seed(__lowerCAmelCase)
lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase)
lowerCAmelCase = sd_pipe.to(__lowerCAmelCase)
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = sd_pipe(**__lowerCAmelCase).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def a_ ( self):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3)
def a_ ( self):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase)
lowerCAmelCase = sd_pipe.to(__lowerCAmelCase)
lowerCAmelCase = sd_pipe.to(__lowerCAmelCase)
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
# forward without prompt embeds
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = 3 * ["""this is a negative prompt"""]
lowerCAmelCase = negative_prompt
lowerCAmelCase = 3 * [inputs["""prompt"""]]
lowerCAmelCase = sd_pipe(**__lowerCAmelCase)
lowerCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = 3 * ["""this is a negative prompt"""]
lowerCAmelCase = 3 * [inputs.pop("""prompt""")]
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = sd_pipe.encode_prompt(__lowerCAmelCase , negative_prompt=__lowerCAmelCase)
lowerCAmelCase = sd_pipe(
**__lowerCAmelCase , prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , pooled_prompt_embeds=__lowerCAmelCase , negative_pooled_prompt_embeds=__lowerCAmelCase , )
lowerCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="cpu" , __lowerCAmelCase=torch.floataa , __lowerCAmelCase=0):
"""simple docstring"""
lowerCAmelCase = torch.Generator(device=__lowerCAmelCase).manual_seed(__lowerCAmelCase)
lowerCAmelCase = np.random.RandomState(__lowerCAmelCase).standard_normal((1, 4, 64, 64))
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase)
lowerCAmelCase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""")
pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_inputs(__lowerCAmelCase)
lowerCAmelCase = pipe(**__lowerCAmelCase).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506])
assert np.abs(image_slice - expected_slice).max() < 7E-3
| 605
|
'''simple docstring'''
from math import ceil, sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 605
| 1
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = "cpu" , SCREAMING_SNAKE_CASE : Union[str, None] = None ):
"""simple docstring"""
snake_case__ : List[Any] =torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
for k, v in tqdm(state_dict.items() ):
if not isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
snake_case__ : Optional[Any] =v.half()
if save_path is None: # overwrite src_path
snake_case__ : List[Any] =src_path
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
fire.Fire(convert)
| 381
|
def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
while a != 0:
snake_case__, snake_case__ : int =b % a, a
return b
def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) != 1:
snake_case__ : List[str] =F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(SCREAMING_SNAKE_CASE )
snake_case__, snake_case__, snake_case__ : Any =1, 0, a
snake_case__, snake_case__, snake_case__ : Any =0, 1, m
while va != 0:
snake_case__ : Dict =ua // va
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : Dict =(ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 381
| 1
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a_ : List[Any] = logging.get_logger(__name__)
class _snake_case ( A__ ):
def __init__( self , *a , **a) -> None:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 444
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _snake_case :
def __init__( self , a , ) -> Tuple:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = 13
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 99
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 37
SCREAMING_SNAKE_CASE = 'gelu'
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 512
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 0.02
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = None
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> List[str]:
SCREAMING_SNAKE_CASE = TFEsmModel(config=a)
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = TFEsmModel(config=a)
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(a , encoder_hidden_states=a)
# Also check the case where encoder outputs are not passed
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = TFEsmForMaskedLM(config=a)
SCREAMING_SNAKE_CASE = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> Dict:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFEsmForTokenClassification(config=a)
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : str = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : int = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : str = False
_lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = TFEsmModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained(a)
self.assertIsNotNone(a)
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
pass
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
SCREAMING_SNAKE_CASE = model.get_bias()
assert isinstance(a , a)
for k, v in name.items():
assert isinstance(a , tf.Variable)
else:
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE = model.get_bias()
assert name is None
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D')
SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]])
SCREAMING_SNAKE_CASE = model(a)[0]
SCREAMING_SNAKE_CASE = [1, 6, 33]
self.assertEqual(list(output.numpy().shape) , a)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2))
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D')
SCREAMING_SNAKE_CASE = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
SCREAMING_SNAKE_CASE = model(a)[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 444
| 1
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _A ( ):
"""simple docstring"""
__lowercase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=A__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=A__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=A__ )
return parser.parse_args()
def _A ( ):
"""simple docstring"""
__lowercase = parse_args()
# Import training_script as a module.
__lowercase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowercase = script_fpath.stem
__lowercase = importlib.import_module(A__ )
# Patch sys.argv
__lowercase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 41
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase ( __A , __A , __A ):
'''simple docstring'''
@register_to_config
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = False , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = nn.Embedding(lowercase , lowercase )
A_ : Any = nn.Embedding(lowercase , lowercase )
A_ : int = False
A_ : Tuple = nn.Dropout(p=lowercase )
A_ : List[Any] = TaConfig(
vocab_size=lowercase , d_model=lowercase , num_heads=lowercase , d_kv=lowercase , d_ff=lowercase , dropout_rate=lowercase , feed_forward_proj=lowercase , is_decoder=lowercase , is_encoder_decoder=lowercase , )
A_ : List[str] = nn.ModuleList()
for lyr_num in range(lowercase ):
A_ : Dict = TaBlock(lowercase )
self.encoders.append(lowercase )
A_ : Any = TaLayerNorm(lowercase )
A_ : Optional[int] = nn.Dropout(p=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : Any = self.token_embedder(lowercase )
A_ : List[Any] = encoder_input_tokens.shape[1]
A_ : int = torch.arange(lowercase , device=encoder_input_tokens.device )
x += self.position_encoding(lowercase )
A_ : Dict = self.dropout_pre(lowercase )
# inverted the attention mask
A_ : List[Any] = encoder_input_tokens.size()
A_ : Dict = self.get_extended_attention_mask(lowercase , lowercase )
for lyr in self.encoders:
A_ : List[str] = lyr(lowercase , lowercase )[0]
A_ : Optional[int] = self.layer_norm(lowercase )
return self.dropout_post(lowercase ), encoder_inputs_mask
| 558
| 0
|
from typing import Any
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : str , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : List[str] , ):
_validation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# Creates data structures and fill initial step
snake_case__ : dict = {}
snake_case__ : dict = {}
for state in states_space:
snake_case__ : Union[str, Any] = observations_space[0]
snake_case__ : List[Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ : Any = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase_ ) ):
snake_case__ : Optional[Any] = observations_space[o]
snake_case__ : Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ : List[str] = ""
snake_case__ : Dict = -1
for k_state in states_space:
snake_case__ : List[str] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ : List[Any] = probability
snake_case__ : List[str] = k_state
# Update probabilities and pointers dicts
snake_case__ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ : Optional[Any] = arg_max
# The final observation
snake_case__ : Dict = observations_space[len(lowerCAmelCase_ ) - 1]
# argmax for given final observation
snake_case__ : Optional[Any] = ""
snake_case__ : Tuple = -1
for k_state in states_space:
snake_case__ : Optional[Any] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ : Optional[Any] = probability
snake_case__ : Tuple = k_state
snake_case__ : List[str] = arg_max
# Process pointers backwards
snake_case__ : Union[str, Any] = last_state
snake_case__ : Optional[Any] = []
for o in range(len(lowerCAmelCase_ ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase_ )
snake_case__ : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : str , snake_case_ : int , snake_case_ : List[str] , ):
_validate_not_empty(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
_validate_lists(lowerCAmelCase_ , lowerCAmelCase_ )
_validate_dicts(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[Any] ):
_validate_list(lowerCAmelCase_ , "observations_space" )
_validate_list(lowerCAmelCase_ , "states_space" )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Dict ):
if not isinstance(_object , lowerCAmelCase_ ):
snake_case__ : Union[str, Any] = F'''{var_name} must be a list'''
raise ValueError(lowerCAmelCase_ )
else:
for x in _object:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case__ : List[Any] = F'''{var_name} must be a list of strings'''
raise ValueError(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Dict , ):
_validate_dict(lowerCAmelCase_ , "initial_probabilities" , lowerCAmelCase_ )
_validate_nested_dict(lowerCAmelCase_ , "transition_probabilities" )
_validate_nested_dict(lowerCAmelCase_ , "emission_probabilities" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[str] ):
_validate_dict(_object , lowerCAmelCase_ , lowerCAmelCase_ )
for x in _object.values():
_validate_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] = False ):
if not isinstance(_object , lowerCAmelCase_ ):
snake_case__ : List[str] = F'''{var_name} must be a dict'''
raise ValueError(lowerCAmelCase_ )
if not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for x in _object ):
snake_case__ : List[str] = F'''{var_name} all keys must be strings'''
raise ValueError(lowerCAmelCase_ )
if not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for x in _object.values() ):
snake_case__ : Union[str, Any] = "nested dictionary " if nested else ""
snake_case__ : str = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 706
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25
| 0
|
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , __UpperCamelCase = 6 ) -> None:
'''simple docstring'''
__UpperCamelCase : Node | None = None
__UpperCamelCase : Node | None = None
self.create_linked_list(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ) -> None:
'''simple docstring'''
__UpperCamelCase : Any = Node()
__UpperCamelCase : List[str] = current_node
__UpperCamelCase : Tuple = current_node
__UpperCamelCase : Dict = current_node
for _ in range(1 , __UpperCamelCase ):
__UpperCamelCase : str = Node()
__UpperCamelCase : str = current_node
__UpperCamelCase : Optional[Any] = previous_node
__UpperCamelCase : str = current_node
__UpperCamelCase : Dict = self.front
__UpperCamelCase : Optional[Any] = previous_node
def __lowerCamelCase ( self ) -> bool:
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __lowerCamelCase ( self ) -> Any | None:
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def __lowerCamelCase ( self , __UpperCamelCase ) -> None:
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__UpperCamelCase : Tuple = self.rear.next
if self.rear:
__UpperCamelCase : Optional[Any] = data
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__UpperCamelCase : int = self.front.data
__UpperCamelCase : int = None
return data
__UpperCamelCase : int = self.front
__UpperCamelCase : List[str] = old_front.next
__UpperCamelCase : int = old_front.data
__UpperCamelCase : Optional[Any] = None
return data
def __lowerCamelCase ( self ) -> None:
'''simple docstring'''
if self.is_empty():
raise Exception("Empty Queue" )
def __lowerCamelCase ( self ) -> None:
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue" )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ) -> None:
'''simple docstring'''
__UpperCamelCase : Any | None = None
__UpperCamelCase : Node | None = None
__UpperCamelCase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Optional[int] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 327
| 1
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=3_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[1_0, 2_0, 3_0, 4_0] , _lowerCamelCase=[2, 2, 3, 2] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=1_0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=["stage2", "stage3", "stage4"] , _lowerCamelCase=3 , _lowerCamelCase=None , ):
__UpperCamelCase : str = parent
__UpperCamelCase : Optional[int] = batch_size
__UpperCamelCase : int = image_size
__UpperCamelCase : int = num_channels
__UpperCamelCase : Dict = num_stages
__UpperCamelCase : Union[str, Any] = hidden_sizes
__UpperCamelCase : int = depths
__UpperCamelCase : Union[str, Any] = is_training
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Tuple = intermediate_size
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : Optional[int] = type_sequence_label_size
__UpperCamelCase : List[Any] = initializer_range
__UpperCamelCase : Union[str, Any] = out_features
__UpperCamelCase : Any = num_labels
__UpperCamelCase : str = scope
__UpperCamelCase : List[str] = num_stages
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowerCAmelCase ( self ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCamelCase , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : str = UperNetForSemanticSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__UpperCamelCase : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCAmelCase ( self ):
__UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
__UpperCamelCase
) : Optional[int] = config_and_inputs
__UpperCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def lowerCAmelCase ( self ):
__UpperCamelCase : int = UperNetModelTester(self )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=3_7 )
def lowerCAmelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self ):
return
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Any = model_class(_lowerCamelCase )
__UpperCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : Union[str, Any] = [*signature.parameters.keys()]
__UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowerCAmelCase ( self ):
__UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def lowerCAmelCase ( self ):
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def lowerCAmelCase ( self ):
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowerCAmelCase ( self ):
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowerCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCAmelCase ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase ( self ):
pass
def lowerCAmelCase ( self ):
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
__UpperCamelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Tuple = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : Tuple = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCAmelCase ( self ):
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : List[Any] = _config_zero_init(_lowerCamelCase )
__UpperCamelCase : List[str] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__UpperCamelCase : Tuple = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def lowerCAmelCase ( self ):
pass
@slow
def lowerCAmelCase ( self ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Tuple = UperNetForSemanticSegmentation.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _UpperCamelCase ( ):
"""simple docstring"""
__UpperCamelCase : Any = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
__UpperCamelCase : Optional[Any] = Image.open(_a ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
__UpperCamelCase : List[str] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(_lowerCamelCase )
__UpperCamelCase : Optional[Any] = prepare_img()
__UpperCamelCase : Optional[int] = processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
with torch.no_grad():
__UpperCamelCase : Dict = model(**_lowerCamelCase )
__UpperCamelCase : int = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
__UpperCamelCase : List[Any] = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
__UpperCamelCase : List[str] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(_lowerCamelCase )
__UpperCamelCase : str = prepare_img()
__UpperCamelCase : List[Any] = processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
with torch.no_grad():
__UpperCamelCase : Dict = model(**_lowerCamelCase )
__UpperCamelCase : Optional[int] = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
__UpperCamelCase : List[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
| 700
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( _a : int , _a : Tuple , _a : Dict=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
__UpperCamelCase : List[Any] = nn.Parameter(_a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
__UpperCamelCase : Dict = nn.Parameter(_a )
def _UpperCamelCase ( _a : Tuple , _a : str , _a : List[str] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = np.asarray(weights[0] )
__UpperCamelCase : str = np.asarray(weights[1] )
__UpperCamelCase : Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.output.dense , torch.tensor(_a ).view(-1 , _a ).contiguous().transpose(0 , 1 ) , )
def _UpperCamelCase ( _a : Union[str, Any] , _a : Any , _a : List[Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = np.asarray(weights[0] )
__UpperCamelCase : Tuple = np.asarray(weights[1] )
__UpperCamelCase : Tuple = np.asarray(weights[2] )
__UpperCamelCase : Optional[int] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.output.dense , torch.tensor(_a ).view(-1 , _a ).contiguous().transpose(0 , 1 ) , )
def _UpperCamelCase ( _a : Dict , _a : str , _a : List[Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = weights[0][0][0]
__UpperCamelCase : Tuple = np.asarray(layer_norm_a[0] )
__UpperCamelCase : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_a ) , torch.tensor(_a ) , )
# lsh weights + output
__UpperCamelCase : int = weights[0][1]
if len(_a ) < 4:
set_layer_weights_in_torch_lsh(_a , torch_block.attention , _a )
else:
set_layer_weights_in_torch_local(_a , torch_block.attention , _a )
# intermediate weighs
__UpperCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(_a ) == 4:
__UpperCamelCase : Optional[Any] = intermediate_weights[2]
# layernorm 2
__UpperCamelCase : int = np.asarray(intermediate_weights[0][0] )
__UpperCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_a ) , torch.tensor(_a ) , )
# intermediate dense
__UpperCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
__UpperCamelCase : int = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_a ).transpose(0 , 1 ).contiguous() , torch.tensor(_a ) , )
# intermediate out
__UpperCamelCase : Dict = np.asarray(intermediate_weights[4][0] )
__UpperCamelCase : List[str] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_a ).transpose(0 , 1 ).contiguous() , torch.tensor(_a ) , )
def _UpperCamelCase ( _a : int , _a : str , _a : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = torch_model.reformer
# word embeds
__UpperCamelCase : Tuple = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_a ) , )
if isinstance(weights[3] , _a ):
__UpperCamelCase : Dict = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__UpperCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
__UpperCamelCase : Tuple = nn.Parameter(torch.tensor(_a ) )
__UpperCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__UpperCamelCase : Any = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_a , _a , _a )
# output layer norm
__UpperCamelCase : Optional[Any] = np.asarray(weights[7][0] )
__UpperCamelCase : Any = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_a ) , torch.tensor(_a ) , )
# output embeddings
__UpperCamelCase : List[Any] = np.asarray(weights[9][0] )
__UpperCamelCase : Union[str, Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_a ).transpose(0 , 1 ).contiguous() , torch.tensor(_a ) , )
def _UpperCamelCase ( _a : Optional[int] , _a : List[Any] , _a : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : str = ReformerConfig.from_json_file(_a )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCamelCase : List[str] = ReformerModelWithLMHead(_a )
with open(_a , 'rb' ) as f:
__UpperCamelCase : Tuple = pickle.load(_a )['weights']
set_model_weights_in_torch(_a , _a , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _a )
if __name__ == "__main__":
a= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a= parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 287
| 0
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _UpperCAmelCase ( UpperCAmelCase : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def _UpperCAmelCase ( UpperCAmelCase : np.ndarray , UpperCAmelCase : np.ndarray ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = XGBClassifier()
classifier.fit(UpperCAmelCase , UpperCAmelCase )
return classifier
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : Tuple = load_iris()
__lowerCamelCase , __lowerCamelCase : Tuple = data_handling(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = train_test_split(
UpperCAmelCase , UpperCAmelCase , test_size=0.2_5 )
__lowerCamelCase : List[str] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
__lowerCamelCase : Tuple = xgboost(UpperCAmelCase , UpperCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , display_labels=UpperCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 519
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def _UpperCAmelCase ( UpperCAmelCase : int ):
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__lowerCamelCase : Union[str, Any] = k.replace(UpperCAmelCase , UpperCAmelCase )
if k.startswith("""encoder""" ):
__lowerCamelCase : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__lowerCamelCase : List[str] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowerCamelCase : Dict = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__lowerCamelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowerCamelCase : int = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__lowerCamelCase : Union[str, Any] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _UpperCAmelCase ( UpperCAmelCase : Dict ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__lowerCamelCase : str = sd.pop(UpperCAmelCase )
__lowerCamelCase : int = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__lowerCamelCase : str = v
__UpperCamelCase : Optional[Any] = ['START']
@torch.no_grad()
def _UpperCAmelCase ( UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : Dict = torch.load(UpperCAmelCase , map_location="""cpu""" )
__lowerCamelCase : List[str] = model["""model"""]
__lowerCamelCase : List[str] = BlenderbotConfig.from_json_file(UpperCAmelCase )
__lowerCamelCase : List[Any] = BlenderbotForConditionalGeneration(UpperCAmelCase )
__lowerCamelCase : str = m.model.state_dict().keys()
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__lowerCamelCase : Optional[Any] = rename_state_dict_key(UpperCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__lowerCamelCase : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase )
m.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
m.half()
m.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 519
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __a :
def __init__( self : List[str] , __magic_name__ : int = 6 ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
self.create_linked_list(__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node()
UpperCAmelCase_ : Optional[int] = current_node
UpperCAmelCase_ : int = current_node
UpperCAmelCase_ : Optional[int] = current_node
for _ in range(1 , __magic_name__ ):
UpperCAmelCase_ : str = Node()
UpperCAmelCase_ : Dict = current_node
UpperCAmelCase_ : Union[str, Any] = previous_node
UpperCAmelCase_ : List[str] = current_node
UpperCAmelCase_ : List[Any] = self.front
UpperCAmelCase_ : Union[str, Any] = previous_node
def UpperCAmelCase__ ( self : str ) -> bool:
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def UpperCAmelCase__ ( self : List[str] ) -> Any | None:
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def UpperCAmelCase__ ( self : Any , __magic_name__ : Any ) -> None:
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase_ : Any = self.rear.next
if self.rear:
UpperCAmelCase_ : List[str] = data
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase_ : Any = self.front.data
UpperCAmelCase_ : Optional[Any] = None
return data
UpperCAmelCase_ : Dict = self.front
UpperCAmelCase_ : Dict = old_front.next
UpperCAmelCase_ : Optional[Any] = old_front.data
UpperCAmelCase_ : List[str] = None
return data
def UpperCAmelCase__ ( self : Tuple ) -> None:
"""simple docstring"""
if self.is_empty():
raise Exception('''Empty Queue''' )
def UpperCAmelCase__ ( self : str ) -> None:
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class __a :
def __init__( self : Optional[int] ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Any | None = None
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
'''simple docstring'''
class __a :
def __init__( self : List[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Tuple = [0] * size
UpperCAmelCase_ : Optional[Any] = [0] * size
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = value
while index < self.size:
UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1
if current_left_border == index:
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
UpperCAmelCase_ : List[str] = 0
while left <= right:
UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ )
if left <= current_left:
UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] )
UpperCAmelCase_ : Optional[Any] = current_left
else:
UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 0
|
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
a_ = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class A_:
"""simple docstring"""
a_ : str
a_ : Optional[str] = None
a_ : Optional[Union[str, int]] = None
a_ : Optional[Union[str, int]] = None
a_ : Optional[Union[str, int]] = None
def _lowerCAmelCase ( self ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = _str_to_version_tuple(self.version_str )
def __repr__( self ):
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def _lowerCAmelCase ( self ):
return self.major, self.minor, self.patch
def _lowerCAmelCase ( self , A ):
if isinstance(A , A ):
return Version(A )
elif isinstance(A , A ):
return other
raise TypeError(F"{other} (type {type(A )}) cannot be compared to version." )
def __eq__( self , A ):
try:
_lowerCamelCase : Dict = self._validate_operand(A )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , A ):
_lowerCamelCase : Any = self._validate_operand(A )
return self.tuple < other.tuple
def __hash__( self ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _lowerCAmelCase ( cls , A ):
_lowerCamelCase : Optional[Any] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _lowerCAmelCase ( self ):
return self.version_str
def UpperCAmelCase_ ( __a : Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = _VERSION_REG.match(__a )
if not res:
raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(__a ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCAmelCase_ ( __a : List[str] ):
'''simple docstring'''
return ".".join(str(__a ) for v in version_tuple )
| 437
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Dict = TextToVideoSDPipeline
a_ : Dict = TEXT_TO_IMAGE_PARAMS
a_ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a_ : str = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
_lowerCamelCase : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_lowerCamelCase : str = CLIPTextModel(A )
_lowerCamelCase : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCamelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _lowerCAmelCase ( self , A , A=0 ):
if str(A ).startswith('mps' ):
_lowerCamelCase : Tuple = torch.manual_seed(A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A )
_lowerCamelCase : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Dict = TextToVideoSDPipeline(**A )
_lowerCamelCase : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(A )
_lowerCamelCase : Union[str, Any] = 'np'
_lowerCamelCase : Optional[int] = sd_pipe(**A ).frames
_lowerCamelCase : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_lowerCamelCase : Tuple = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
_lowerCamelCase : Dict = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_lowerCamelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowerCamelCase : Tuple = pipe.to('cuda' )
_lowerCamelCase : str = 'Spiderman is surfing'
_lowerCamelCase : Any = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase : Union[str, Any] = pipe(A , generator=A , num_inference_steps=25 , output_type='pt' ).frames
_lowerCamelCase : Any = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
_lowerCamelCase : int = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_lowerCamelCase : Optional[Any] = pipe.to('cuda' )
_lowerCamelCase : Tuple = 'Spiderman is surfing'
_lowerCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase : Tuple = pipe(A , generator=A , num_inference_steps=2 , output_type='pt' ).frames
_lowerCamelCase : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 437
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase (a_ :list[int] , a_ :list[int] , a_ :list[int] , a_ :list[list[str]] , a_ :int , ) -> None:
lowercase :int = len(a_)
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board])
return
# We iterate each column in the row to find all possible results in each row
for col in range(a_):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , a_ , a_ , )
def lowerCamelCase (a_ :int) -> None:
lowercase :list[list[str]] = []
depth_first_search([] , [] , [] , a_ , a_)
# Print all the boards
for board in boards:
for column in board:
print(a_)
print('''''')
print(len(a_) , '''solutions were found.''')
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 475
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
__A : int = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A : List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__A : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__A : Optional[int] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __snake_case ( self : Tuple , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int ):
'''simple docstring'''
lowercase :Optional[int] = ZeroShotClassificationPipeline(
model=snake_case__ , tokenizer=snake_case__ , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __snake_case ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :int = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ )], '''scores''': [ANY(snake_case__ )]} )
# No kwarg
lowercase :Tuple = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ )], '''scores''': [ANY(snake_case__ )]} )
lowercase :Tuple = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ )], '''scores''': [ANY(snake_case__ )]} )
lowercase :Union[str, Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ ), ANY(snake_case__ )], '''scores''': [ANY(snake_case__ ), ANY(snake_case__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowercase :Optional[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ ), ANY(snake_case__ )], '''scores''': [ANY(snake_case__ ), ANY(snake_case__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowercase :Optional[Any] = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(snake_case__ , {'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ )], '''scores''': [ANY(snake_case__ )]} )
# https://github.com/huggingface/transformers/issues/13846
lowercase :Optional[Any] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ ), ANY(snake_case__ )], '''scores''': [ANY(snake_case__ ), ANY(snake_case__ )]}
for i in range(1 )
] , )
lowercase :Tuple = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''labels''': [ANY(snake_case__ ), ANY(snake_case__ )], '''scores''': [ANY(snake_case__ ), ANY(snake_case__ )]}
for i in range(2 )
] , )
with self.assertRaises(snake_case__ ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(snake_case__ ):
classifier(snake_case__ , candidate_labels='''politics''' )
with self.assertRaises(snake_case__ ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(snake_case__ ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=snake_case__ )
with self.assertRaises(snake_case__ ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(snake_case__ ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=snake_case__ , )
self.run_entailment_id(snake_case__ )
def __snake_case ( self : Any , snake_case__ : Pipeline ):
'''simple docstring'''
lowercase :List[Any] = zero_shot_classifier.model.config
lowercase :int = config.labelaid
lowercase :str = zero_shot_classifier.entailment_id
lowercase :Dict = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
lowercase :Optional[Any] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowercase :Tuple = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowercase :str = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
lowercase :Optional[Any] = original_labelaid
self.assertEqual(snake_case__ , zero_shot_classifier.entailment_id )
@require_torch
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :List[Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 1_0_0 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :str = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
lowercase :Any = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(snake_case__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Tuple = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
lowercase :List[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(snake_case__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Dict = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
lowercase :Union[str, Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(snake_case__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_76, 0.0_15, 0.0_09],
} , )
lowercase :Optional[Any] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :str = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
lowercase :Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(snake_case__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_76, 0.0_15, 0.0_09],
} , )
lowercase :str = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 475
| 1
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
__magic_name__ : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__magic_name__ : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
__magic_name__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__magic_name__ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__magic_name__ : Tuple = CLIPTextModel(_a )
__magic_name__ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__magic_name__ : List[Any] = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self , _a , _a=0 ):
if str(_a ).startswith("mps" ):
__magic_name__ : Any = torch.manual_seed(_a )
else:
__magic_name__ : Dict = torch.Generator(device=_a ).manual_seed(_a )
__magic_name__ : List[str] = 2
__magic_name__ : int = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , )
__magic_name__ : str = floats_tensor(control_image.shape , rng=random.Random(_a ) ).to(_a )
__magic_name__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : List[str] = Image.fromarray(np.uinta(_a ) ).convert("RGB" ).resize((64, 64) )
__magic_name__ : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
__magic_name__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_a ):
if isinstance(_a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__magic_name__ : Union[str, Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
__magic_name__ : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
__magic_name__ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__magic_name__ : Dict = CLIPTextModel(_a )
__magic_name__ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__magic_name__ : Union[str, Any] = MultiControlNetModel([controlneta, controlneta] )
__magic_name__ : Tuple = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self , _a , _a=0 ):
if str(_a ).startswith("mps" ):
__magic_name__ : Dict = torch.manual_seed(_a )
else:
__magic_name__ : Optional[Any] = torch.Generator(device=_a ).manual_seed(_a )
__magic_name__ : str = 2
__magic_name__ : Tuple = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
]
__magic_name__ : Tuple = floats_tensor(control_image[0].shape , rng=random.Random(_a ) ).to(_a )
__magic_name__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("RGB" ).resize((64, 64) )
__magic_name__ : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = self.get_dummy_components()
__magic_name__ : Tuple = self.pipeline_class(**_a )
pipe.to(_a )
__magic_name__ : int = 10.0
__magic_name__ : Any = 4
__magic_name__ : Optional[int] = self.get_dummy_inputs(_a )
__magic_name__ : Any = steps
__magic_name__ : Optional[int] = scale
__magic_name__ : str = pipe(**_a )[0]
__magic_name__ : List[Any] = self.get_dummy_inputs(_a )
__magic_name__ : str = steps
__magic_name__ : str = scale
__magic_name__ : int = pipe(**_a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__magic_name__ : str = self.get_dummy_inputs(_a )
__magic_name__ : str = steps
__magic_name__ : List[str] = scale
__magic_name__ : Dict = pipe(**_a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__magic_name__ : List[str] = self.get_dummy_inputs(_a )
__magic_name__ : Optional[Any] = steps
__magic_name__ : int = scale
__magic_name__ : str = pipe(**_a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def SCREAMING_SNAKE_CASE ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.get_dummy_components()
__magic_name__ : Any = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
__magic_name__ : Tuple = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=_a , controlnet=_a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
__magic_name__ : Tuple = "evil space-punk bird"
__magic_name__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
__magic_name__ : Union[str, Any] = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
__magic_name__ : Tuple = pipe(
_a , _a , control_image=_a , generator=_a , output_type="np" , num_inference_steps=50 , strength=0.6 , )
__magic_name__ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
__magic_name__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9e-2
| 124
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = DiTPipeline
UpperCamelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
UpperCamelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
__magic_name__ : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_a , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=_a , )
__magic_name__ : int = AutoencoderKL()
__magic_name__ : str = DDIMScheduler()
__magic_name__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def SCREAMING_SNAKE_CASE ( self , _a , _a=0 ):
if str(_a ).startswith("mps" ):
__magic_name__ : str = torch.manual_seed(_a )
else:
__magic_name__ : Optional[Any] = torch.Generator(device=_a ).manual_seed(_a )
__magic_name__ : Dict = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = "cpu"
__magic_name__ : Optional[int] = self.get_dummy_components()
__magic_name__ : Any = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : Tuple = self.get_dummy_inputs(_a )
__magic_name__ : Any = pipe(**_a ).images
__magic_name__ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__magic_name__ : List[Any] = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__magic_name__ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
__magic_name__ : Optional[int] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__magic_name__ : int = ["vase", "umbrella", "white shark", "white wolf"]
__magic_name__ : str = pipe.get_label_ids(_a )
__magic_name__ : Dict = pipe(_a , generator=_a , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(_a , _a ):
__magic_name__ : int = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__magic_name__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__magic_name__ : List[str] = ["vase", "umbrella"]
__magic_name__ : Any = pipe.get_label_ids(_a )
__magic_name__ : List[str] = torch.manual_seed(0 )
__magic_name__ : Optional[Any] = pipe(_a , generator=_a , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(_a , _a ):
__magic_name__ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 124
| 1
|
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def SCREAMING_SNAKE_CASE_ ( snake_case : list[int] , snake_case : list[int] , snake_case : int )-> list[int]:
_lowerCamelCase = [0] * no_of_processes
_lowerCamelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(snake_case ):
_lowerCamelCase = burst_time[i]
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 999_999_999
_lowerCamelCase = 0
_lowerCamelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(snake_case ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_lowerCamelCase = remaining_time[j]
_lowerCamelCase = j
_lowerCamelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_lowerCamelCase = remaining_time[short]
if minm == 0:
_lowerCamelCase = 999_999_999
if remaining_time[short] == 0:
complete += 1
_lowerCamelCase = False
# Find finish time of current process
_lowerCamelCase = increment_time + 1
# Calculate waiting time
_lowerCamelCase = finish_time - arrival_time[short]
_lowerCamelCase = finar - burst_time[short]
if waiting_time[short] < 0:
_lowerCamelCase = 0
# Increment time
increment_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE_ ( snake_case : list[int] , snake_case : int , snake_case : list[int] )-> list[int]:
_lowerCamelCase = [0] * no_of_processes
for i in range(snake_case ):
_lowerCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def SCREAMING_SNAKE_CASE_ ( snake_case : list[int] , snake_case : list[int] , snake_case : int )-> None:
_lowerCamelCase = 0
_lowerCamelCase = 0
for i in range(snake_case ):
_lowerCamelCase = total_waiting_time + waiting_time[i]
_lowerCamelCase = total_turn_around_time + turn_around_time[i]
print(f'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
A_ : Dict =int(input())
A_ : Tuple =[0] * no_of_processes
A_ : List[str] =[0] * no_of_processes
A_ : Optional[Any] =list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
A_ : List[str] =map(int, input().split())
A_ : int =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
A_ : Optional[Any] =burst_time
A_ : int =no_of_processes
A_ : int =waiting_time
A_ : Tuple =calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
A_ : Optional[int] =pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 712
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[Any] =logging.get_logger(__name__)
A_ : Optional[Any] ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = "speech_to_text_2"
SCREAMING_SNAKE_CASE__ : int = ["past_key_values"]
SCREAMING_SNAKE_CASE__ : Any = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , a__=1_00_00 , a__=6 , a__=20_48 , a__=4 , a__=0.0 , a__=True , a__="relu" , a__=2_56 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.02 , a__=2 , a__=True , a__=1 , a__=0 , a__=2 , a__=10_24 , **a__ , ):
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = activation_function
_lowerCamelCase = init_std
_lowerCamelCase = decoder_layerdrop
_lowerCamelCase = use_cache
_lowerCamelCase = decoder_layers
_lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase = max_target_positions
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , decoder_start_token_id=a__ , **a__ , )
| 222
| 0
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''spiece.model'''}
__lowerCamelCase : Dict = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
__lowerCamelCase : List[str] = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
__lowerCamelCase : Tuple = '''▁'''
class A_ (a_ ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple="</s>" , lowerCAmelCase__ :Optional[int]="<unk>" , lowerCAmelCase__ :Tuple="<pad>" , lowerCAmelCase__ :int=100 , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Optional[Dict[str, Any]] = None , lowerCAmelCase__ :str=True , **lowerCAmelCase__ :Union[str, Any] , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ : Tuple = [F'''<extra_id_{i}>''' for i in range(lowerCAmelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case_ : List[str] = len(set(filter(lambda lowerCAmelCase__ : bool("extra_id" in str(lowerCAmelCase__ ) ) , lowerCAmelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
snake_case_ : Union[str, Any] = legacy
snake_case_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , extra_ids=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowerCAmelCase__ , **lowerCAmelCase__ , )
snake_case_ : str = vocab_file
snake_case_ : str = extra_ids
snake_case_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
@staticmethod
def _A ( lowerCAmelCase__ :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :Any ) -> List[str]:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case_ : str = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowerCAmelCase__ , )
return max_model_length
@property
def _A ( self :int ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def _A ( self :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self :int , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCAmelCase__ )) + [1]
return ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def _A ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
return list(
set(filter(lambda lowerCAmelCase__ : bool(re.search(r"<extra_id_\d+>" , lowerCAmelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return [self._convert_token_to_id(lowerCAmelCase__ ) for token in self.get_sentinel_tokens()]
def _A ( self :int , lowerCAmelCase__ :List[int] ) -> List[int]:
'''simple docstring'''
if len(lowerCAmelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _A ( self :List[Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _A ( self :str , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = self._add_eos_if_not_present(lowerCAmelCase__ )
if token_ids_a is None:
return token_ids_a
else:
snake_case_ : List[Any] = self._add_eos_if_not_present(lowerCAmelCase__ )
return token_ids_a + token_ids_a
def __getstate__( self :List[Any] ) -> int:
'''simple docstring'''
snake_case_ : int = self.__dict__.copy()
snake_case_ : List[str] = None
return state
def __setstate__( self :int , lowerCAmelCase__ :Dict ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ : int = {}
snake_case_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self :str , lowerCAmelCase__ :"TextInput" , **lowerCAmelCase__ :Optional[int] ) -> List[str]:
'''simple docstring'''
if not self.legacy:
snake_case_ : str = SPIECE_UNDERLINE + text.replace(lowerCAmelCase__ , " " )
return super().tokenize(lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Optional[int] ) -> Any:
'''simple docstring'''
if not self.legacy:
snake_case_ : Optional[int] = text.startswith(lowerCAmelCase__ )
if is_first:
snake_case_ : int = text[1:]
snake_case_ : Optional[int] = self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _A ( self :int , lowerCAmelCase__ :Tuple ) -> List[str]:
'''simple docstring'''
if token.startswith("<extra_id_" ):
snake_case_ : Union[str, Any] = re.match(r"<extra_id_(\d+)>" , lowerCAmelCase__ )
snake_case_ : int = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :int ) -> Optional[int]:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
snake_case_ : Any = self.sp_model.IdToPiece(lowerCAmelCase__ )
else:
snake_case_ : List[str] = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
snake_case_ : Tuple = []
snake_case_ : Dict = ""
snake_case_ : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
snake_case_ : List[str] = True
snake_case_ : Dict = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
snake_case_ : int = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def _A ( self :int , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Optional[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
snake_case_ : int = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 653
|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case_ : str = [1, 2, 3]
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 )
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" ,[2, -1] )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = [1, 2]
snake_case_ : Union[str, Any] = {"a": 1, "b": 2}
snake_case_ : str = {"a": [1, 2], "b": [3, 4]}
snake_case_ : List[str] = {"a": {"1": 1}, "b": 2}
snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ : Tuple = [2, 3]
snake_case_ : str = {"a": 2, "b": 3}
snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]}
snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3}
snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
| 653
| 1
|
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = mock.Mock()
lowercase_ = 5_0_0
lowercase_ = {}
lowercase_ = HTTPError
lowercase_ = {}
# Download this model to make sure it's in the cache.
lowercase_ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""")
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=lowerCAmelCase_) as mock_head:
lowercase_ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""")
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = mock.Mock()
lowercase_ = 5_0_0
lowercase_ = {}
lowercase_ = HTTPError
lowercase_ = {}
# Download this model to make sure it's in the cache.
lowercase_ = GPTaTokenizerFast.from_pretrained("""gpt2""")
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=lowerCAmelCase_) as mock_head:
lowercase_ = GPTaTokenizerFast.from_pretrained("""gpt2""")
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
try:
lowercase_ = tempfile.mktemp()
with open(lowerCAmelCase_ , """wb""") as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , lowerCAmelCase_)
lowercase_ = AlbertTokenizer.from_pretrained(lowerCAmelCase_)
finally:
os.remove(lowerCAmelCase_)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json"""):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" , """wb""") as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , lowerCAmelCase_)
lowercase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""")
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""")
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _UpperCAmelCase ( cls : Dict):
"""simple docstring"""
lowercase_ = TOKEN
HfFolder.save_token(lowerCAmelCase_)
@classmethod
def _UpperCAmelCase ( cls : Optional[Any]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-tokenizer""")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""")
except HTTPError:
pass
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ = os.path.join(lowerCAmelCase_ , """vocab.txt""")
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens]))
lowercase_ = BertTokenizer(lowerCAmelCase_)
tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token)
lowercase_ = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id="""test-tokenizer""")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ , repo_id="""test-tokenizer""" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token)
lowercase_ = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ = os.path.join(lowerCAmelCase_ , """vocab.txt""")
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens]))
lowercase_ = BertTokenizer(lowerCAmelCase_)
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token)
lowercase_ = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""")
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCAmelCase_ , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token)
lowercase_ = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""")
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
@require_tokenizers
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ = os.path.join(lowerCAmelCase_ , """vocab.txt""")
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens]))
lowercase_ = CustomTokenizer(lowerCAmelCase_)
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token)
lowercase_ = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCAmelCase_)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""")
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ = os.path.join(lowerCAmelCase_ , """vocab.txt""")
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens]))
lowercase_ = BertTokenizerFast.from_pretrained(lowerCAmelCase_)
bert_tokenizer.save_pretrained(lowerCAmelCase_)
lowercase_ = CustomTokenizerFast.from_pretrained(lowerCAmelCase_)
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token)
lowercase_ = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCAmelCase_)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""")
lowercase_ = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Trie()
trie.add("""Hello 友達""")
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}})
trie.add("""Hello""")
trie.data
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}})
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""") , ["""[CLS] This is a extra_id_100"""])
trie.add("""[CLS]""")
trie.add("""extra_id_1""")
trie.add("""extra_id_100""")
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""") , ["""[CLS]""", """ This is a """, """extra_id_100"""])
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Trie()
trie.add("""A""")
self.assertEqual(trie.split("""ABC""") , ["""A""", """BC"""])
self.assertEqual(trie.split("""BCA""") , ["""BC""", """A"""])
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = Trie()
trie.add("""TOKEN]""")
trie.add("""[SPECIAL_TOKEN]""")
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""") , ["""This is something """, """[SPECIAL_TOKEN]"""])
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = Trie()
trie.add("""A""")
trie.add("""P""")
trie.add("""[SPECIAL_TOKEN]""")
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""") , ["""This is something """, """[SPECIAL_TOKEN]"""])
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Trie()
trie.add("""AB""")
trie.add("""B""")
trie.add("""C""")
self.assertEqual(trie.split("""ABC""") , ["""AB""", """C"""])
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Trie()
trie.add("""ABC""")
trie.add("""B""")
trie.add("""CD""")
self.assertEqual(trie.split("""ABCD""") , ["""ABC""", """D"""])
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = Trie()
lowercase_ = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3])
self.assertEqual(lowerCAmelCase_ , ["""AB""", """C"""])
| 100
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase : List[Any] = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100
| 1
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=100 , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Union[str, Any]=30 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : int=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=10 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Union[str, Any]=[0, 1, 2, 3] , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = 100
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = out_indices
UpperCAmelCase_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = BeitModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
UpperCAmelCase_ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = BeitForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
UpperCAmelCase_ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
UpperCAmelCase_ = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = BeitForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
UpperCAmelCase_ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase_ = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = BeitModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(SCREAMING_SNAKE_CASE__ ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase_ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
UpperCAmelCase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ = False
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(SCREAMING_SNAKE_CASE__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase_ = model_class(SCREAMING_SNAKE_CASE__ )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
UpperCAmelCase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(SCREAMING_SNAKE_CASE__ )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = BeitModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values.to(SCREAMING_SNAKE_CASE__ )
# prepare bool_masked_pos
UpperCAmelCase_ = torch.ones((1, 196) , dtype=torch.bool ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(pixel_values=SCREAMING_SNAKE_CASE__ , bool_masked_pos=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-2 ) )
@slow
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
UpperCAmelCase_ = 281
self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE__ )
@slow
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 21841) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
UpperCAmelCase_ = 2396
self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE__ )
@slow
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase_ = model.to(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = BeitImageProcessor(do_resize=SCREAMING_SNAKE_CASE__ , size=640 , do_center_crop=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase_ = Image.open(ds[0]["file"] )
UpperCAmelCase_ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase_ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=SCREAMING_SNAKE_CASE__ , )
else:
UpperCAmelCase_ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
@slow
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase_ = model.to(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = BeitImageProcessor(do_resize=SCREAMING_SNAKE_CASE__ , size=640 , do_center_crop=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase_ = Image.open(ds[0]["file"] )
UpperCAmelCase_ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = outputs.logits.detach().cpu()
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ , target_sizes=[(500, 300)] )
UpperCAmelCase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
| 82
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 143
| 0
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__UpperCAmelCase = datasets.utils.logging.get_logger(__name__)
__UpperCAmelCase = ['names', 'prefix']
__UpperCAmelCase = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__UpperCAmelCase = ['encoding_errors', 'on_bad_lines']
__UpperCAmelCase = ['date_format']
@dataclass
class _a ( datasets.BuilderConfig ):
"""simple docstring"""
A = ","
A = None
A = "infer"
A = None
A = None
A = None
A = None
A = None
A = True
A = None
A = None
A = None
A = None
A = False
A = None
A = None
A = None
A = True
A = True
A = False
A = True
A = None
A = "."
A = None
A = '"'
A = 0
A = None
A = None
A = None
A = None
A = True
A = True
A = 0
A = True
A = False
A = None
A = 1_00_00
A = None
A = "strict"
A = "error"
A = None
def __a ( self ):
if self.delimiter is not None:
SCREAMING_SNAKE_CASE : List[str] = self.delimiter
if self.column_names is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.column_names
@property
def __a ( self ):
SCREAMING_SNAKE_CASE : Tuple = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,__SCREAMING_SNAKE_CASE ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _a ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
A = CsvConfig
def __a ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __a ( self ,__SCREAMING_SNAKE_CASE ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
SCREAMING_SNAKE_CASE : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE ,(str, list, tuple) ):
SCREAMING_SNAKE_CASE : Tuple = data_files
if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : List[Any] = [files]
SCREAMING_SNAKE_CASE : Union[str, Any] = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Tuple = [files]
SCREAMING_SNAKE_CASE : Optional[Any] = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE ,gen_kwargs={'files': files} ) )
return splits
def __a ( self ,__SCREAMING_SNAKE_CASE ):
if self.config.features is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(__SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ):
# cheaper cast
SCREAMING_SNAKE_CASE : str = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=__SCREAMING_SNAKE_CASE )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE : Any = table_cast(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
return pa_table
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__SCREAMING_SNAKE_CASE ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE : Tuple = pd.read_csv(__SCREAMING_SNAKE_CASE ,iterator=__SCREAMING_SNAKE_CASE ,dtype=__SCREAMING_SNAKE_CASE ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Any = pa.Table.from_pandas(__SCREAMING_SNAKE_CASE )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(__SCREAMING_SNAKE_CASE )}: {e}""" )
raise
| 220
|
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCAmelCase = 'http://www.mocksite.com/file1.txt'
__UpperCAmelCase = '"text": ["foo", "foo"]'
__UpperCAmelCase = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class _a :
"""simple docstring"""
A = 2_00
A = {'Content-Length': '100'}
A = {}
def __a ( self ,**__SCREAMING_SNAKE_CASE ):
return [bytes(__SCREAMING_SNAKE_CASE ,'utf-8' )]
def SCREAMING_SNAKE_CASE_ ( *snake_case_ : Optional[Any] , **snake_case_ : Optional[int] ) -> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int ) -> Any:
import requests
monkeypatch.setattr(snake_case_ , 'request' , snake_case_ )
SCREAMING_SNAKE_CASE : int = URL
if issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : Any = url
elif issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [url]
elif issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : List[Any] = {'train': url}
SCREAMING_SNAKE_CASE : List[Any] = 'dummy'
SCREAMING_SNAKE_CASE : str = 'downloads'
SCREAMING_SNAKE_CASE : str = tmp_path
SCREAMING_SNAKE_CASE : int = DownloadConfig(
cache_dir=os.path.join(snake_case_ , snake_case_ ) , use_etag=snake_case_ , )
SCREAMING_SNAKE_CASE : Tuple = DownloadManager(dataset_name=snake_case_ , download_config=snake_case_ )
SCREAMING_SNAKE_CASE : Any = dl_manager.download(snake_case_ )
SCREAMING_SNAKE_CASE : List[str] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : List[str] = [downloaded_paths]
SCREAMING_SNAKE_CASE : List[str] = [urls]
elif isinstance(snake_case_ , snake_case_ ):
assert "train" in downloaded_paths.keys()
SCREAMING_SNAKE_CASE : Optional[Any] = downloaded_paths.values()
SCREAMING_SNAKE_CASE : Dict = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(snake_case_ , snake_case_ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
SCREAMING_SNAKE_CASE : List[Any] = Path(snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
SCREAMING_SNAKE_CASE : List[Any] = downloaded_path.read_text()
assert content == CONTENT
SCREAMING_SNAKE_CASE : int = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Tuple ) -> Any:
SCREAMING_SNAKE_CASE : int = str(snake_case_ )
if issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : Dict = filename
elif issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : List[str] = [filename]
elif issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : Any = {'train': filename}
SCREAMING_SNAKE_CASE : Dict = 'dummy'
SCREAMING_SNAKE_CASE : Tuple = xz_file.parent
SCREAMING_SNAKE_CASE : Any = 'extracted'
SCREAMING_SNAKE_CASE : Optional[int] = DownloadConfig(
cache_dir=snake_case_ , use_etag=snake_case_ , )
SCREAMING_SNAKE_CASE : List[str] = DownloadManager(dataset_name=snake_case_ , download_config=snake_case_ )
SCREAMING_SNAKE_CASE : List[str] = dl_manager.extract(snake_case_ )
SCREAMING_SNAKE_CASE : Any = paths
for extracted_paths in [extracted_paths]:
if isinstance(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : Any = [extracted_paths]
SCREAMING_SNAKE_CASE : Tuple = [paths]
elif isinstance(snake_case_ , snake_case_ ):
assert "train" in extracted_paths.keys()
SCREAMING_SNAKE_CASE : Dict = extracted_paths.values()
SCREAMING_SNAKE_CASE : Union[str, Any] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(snake_case_ , snake_case_ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
SCREAMING_SNAKE_CASE : Optional[int] = Path(snake_case_ )
SCREAMING_SNAKE_CASE : int = extracted_path.parts
assert parts[-1] == hash_url_to_filename(snake_case_ , etag=snake_case_ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
SCREAMING_SNAKE_CASE : List[Any] = extracted_path.read_text()
SCREAMING_SNAKE_CASE : Any = text_file.read_text()
assert extracted_file_content == expected_file_content
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ) -> List[str]:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(snake_case_ , start=1 ):
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Tuple , snake_case_ : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE : int = request.getfixturevalue(snake_case_ )
SCREAMING_SNAKE_CASE : Optional[int] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(snake_case_ ) , start=1 ):
_test_jsonl(snake_case_ , snake_case_ )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] , snake_case_ : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE : str = request.getfixturevalue(snake_case_ )
SCREAMING_SNAKE_CASE : int = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(snake_case_ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(snake_case_ ) , start=1 ):
_test_jsonl(snake_case_ , snake_case_ )
assert num_tar == 1
assert num_jsonl == 2
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(snake_case_ ) , start=1 ):
assert os.path.basename(snake_case_ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 220
| 1
|
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
if not head:
return True
# split the list to two parts
_lowerCamelCase, _lowerCamelCase : List[str] = head.next, head
while fast and fast.next:
_lowerCamelCase : Union[str, Any] = fast.next.next
_lowerCamelCase : str = slow.next
_lowerCamelCase : Any = slow.next
_lowerCamelCase : Tuple = None # Don't forget here! But forget still works!
# reverse the second part
_lowerCamelCase : int = None
while second:
_lowerCamelCase : Union[str, Any] = second.next
_lowerCamelCase : Dict = node
_lowerCamelCase : List[Any] = second
_lowerCamelCase : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_lowerCamelCase : List[Any] = node.next
_lowerCamelCase : Dict = head.next
return True
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_lowerCamelCase : Tuple = head
while fast and fast.next:
_lowerCamelCase, _lowerCamelCase : str = fast.next.next, slow.next
# 2. Push the second half into the stack
_lowerCamelCase : Dict = [slow.val]
while slow.next:
_lowerCamelCase : Tuple = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_lowerCamelCase : Optional[int] = cur.next
return True
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
if not head or not head.next:
return True
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[Any] = 0
while head:
if head.val in d:
d[head.val].append(_lowerCamelCase )
else:
_lowerCamelCase : Any = [pos]
_lowerCamelCase : Any = head.next
pos += 1
_lowerCamelCase : str = pos - 1
_lowerCamelCase : Optional[int] = 0
for v in d.values():
if len(_lowerCamelCase ) % 2 != 0:
middle += 1
else:
_lowerCamelCase : Optional[int] = 0
for i in range(0 , len(_lowerCamelCase ) ):
if v[i] + v[len(_lowerCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 46
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Dict:
"""simple docstring"""
if "." in tensor_name:
A__ = tensor_name.split('''.''' )
for split in splits[:-1]:
A__ = getattr(lowercase_ , lowercase_ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
A__ = new_module
A__ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
A__ = tensor_name in module._buffers
A__ = getattr(lowercase_ , lowercase_ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
A__ = False
A__ = False
if is_buffer or not is_bitsandbytes_available():
A__ = False
A__ = False
else:
A__ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
A__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
A__ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
A__ = old_value.to(lowercase_ )
elif isinstance(lowercase_ , torch.Tensor ):
A__ = value.to('''cpu''' )
if value.dtype == torch.inta:
A__ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
A__ = torch.tensor(lowercase_ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowercase_ ) and fpaa_statistics is None:
A__ = new_value.T
A__ = old_value.__dict__
if is_abit:
A__ = bnb.nn.IntaParams(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ )
elif is_abit:
A__ = bnb.nn.Paramsabit(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ )
A__ = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(lowercase_ ) )
else:
if value is None:
A__ = old_value.to(lowercase_ )
elif isinstance(lowercase_ , torch.Tensor ):
A__ = value.to(lowercase_ )
else:
A__ = torch.tensor(lowercase_ , device=lowercase_ )
if is_buffer:
A__ = new_value
else:
A__ = nn.Parameter(lowercase_ , requires_grad=old_value.requires_grad )
A__ = new_value
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False ) -> Dict:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
A__ = []
current_key_name.append(lowercase_ )
if (isinstance(lowercase_ , nn.Linear ) or isinstance(lowercase_ , lowercase_ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(lowercase_ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowercase_ , lowercase_ ):
A__ , A__ = module.weight.shape
else:
A__ = module.in_features
A__ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
A__ = bnb.nn.LinearabitLt(
lowercase_ , lowercase_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
A__ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
A__ = bnb.nn.Linearabit(
lowercase_ , lowercase_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
A__ = True
# Store the module class in case we need to transpose the weight later
A__ = type(lowercase_ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowercase_ )
if len(list(module.children() ) ) > 0:
A__ , A__ = _replace_with_bnb_linear(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_been_replaced=lowercase_ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Tuple:
"""simple docstring"""
A__ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
A__ , A__ = _replace_with_bnb_linear(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Dict:
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , lowercase_ , )
return replace_with_bnb_linear(*lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , lowercase_ , )
return set_module_quantized_tensor_to_device(*lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = deepcopy(lowercase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
A__ = find_tied_parameters(lowercase_ )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase_ , lowercase_ ):
A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A__ = sum(lowercase_ , [] )
A__ = len(lowercase_ ) > 0
# Check if it is a base model
A__ = not hasattr(lowercase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A__ = list(model.named_children() )
A__ = [list_modules[-1][0]]
# add last module together with tied weights
A__ = set(lowercase_ ) - set(lowercase_ )
A__ = list(set(lowercase_ ) ) + list(lowercase_ )
# remove ".weight" from the keys
A__ = ['''.weight''', '''.bias''']
A__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A__ = name.replace(lowercase_ , '''''' )
filtered_module_names.append(lowercase_ )
return filtered_module_names
| 87
| 0
|
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class A ( a ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaises(snake_case_ ):
_a = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __lowerCAmelCase ( self ) -> List[Any]:
with self.assertRaises(snake_case_ ):
_a = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def __lowerCAmelCase ( self ) -> Any:
_a = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self ) -> List[str]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_a = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def __lowerCAmelCase ( self ) -> Any:
_a = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self ) -> Any:
_a = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_a = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __lowerCAmelCase ( self ) -> Any:
_a = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import PIL.Image
_a = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=snake_case_ ) as mock_cast_to_python_objects:
_a = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_a , _a = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , snake_case_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : int ):
_a = pa.BufferReader(lowerCamelCase__ ) if isinstance(lowerCamelCase__, pa.Buffer ) else pa.memory_map(lowerCamelCase__ )
_a = pa.ipc.open_stream(lowerCamelCase__ )
_a = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10] )
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : Dict ):
_a = pa.BufferOutputStream()
_a = pa.schema(lowerCamelCase__ ) if fields else None
with ArrowWriter(stream=lowerCamelCase__, schema=lowerCamelCase__, writer_batch_size=lowerCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowercase ( ):
_a = pa.BufferOutputStream()
_a = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=lowerCamelCase__, features=lowerCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_a = pa.BufferReader(output.getvalue() )
_a = pa.ipc.open_stream(lowerCamelCase__ )
_a = f.read_all()
_a = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCamelCase__ )
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10] )
def _lowercase ( lowerCamelCase__ : Optional[int] ):
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase__, writer_batch_size=lowerCamelCase__, hash_salt="split_name", check_duplicates=lowerCamelCase__, ) as writer:
with pytest.raises(lowerCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1}, key=[1, 2] )
_a , _a = writer.finalize()
@pytest.mark.parametrize("writer_batch_size", [None, 2, 10] )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase__, writer_batch_size=lowerCamelCase__, hash_salt="split_name", check_duplicates=lowerCamelCase__, ) as writer:
with pytest.raises(lowerCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1}, key=10 )
writer.write({"col_1": "bar", "col_2": 2}, key=10 )
_a , _a = writer.finalize()
@pytest.mark.parametrize("writer_batch_size", [None, 2, 10] )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase__, writer_batch_size=lowerCamelCase__, hash_salt="split_name", check_duplicates=lowerCamelCase__, ) as writer:
writer.write({"col_1": "foo", "col_2": 1}, key=1 )
writer.write({"col_1": "bar", "col_2": 2}, key=2 )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10] )
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Optional[Any] ):
_a = pa.BufferOutputStream()
_a = pa.schema(lowerCamelCase__ ) if fields else None
with ArrowWriter(stream=lowerCamelCase__, schema=lowerCamelCase__, writer_batch_size=lowerCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10] )
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : Any ):
_a = pa.BufferOutputStream()
_a = pa.schema(lowerCamelCase__ ) if fields else None
with ArrowWriter(stream=lowerCamelCase__, schema=lowerCamelCase__, writer_batch_size=lowerCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10] )
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = pa.BufferOutputStream()
_a = pa.schema(lowerCamelCase__ ) if fields else None
with ArrowWriter(stream=lowerCamelCase__, schema=lowerCamelCase__, writer_batch_size=lowerCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowercase ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
_a = {"col_1": pa.string(), "col_2": pa.intaa()}
_a = os.path.join(lowerCamelCase__, "test.arrow" )
with ArrowWriter(path=lowerCamelCase__, schema=pa.schema(lowerCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCamelCase__, metadata=writer._schema.metadata )
_check_output(lowerCamelCase__, 1 )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
if pa.types.is_list(lowerCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
if isinstance(lst[0], lowerCamelCase__ ):
change_first_primitive_element_in_list(lst[0], lowerCamelCase__ )
else:
_a = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype", [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict ):
_a = pa.array(TypedSequence(lowerCamelCase__, optimized_int_type=lowerCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype", [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
], )
@pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Tuple ):
# in range
_a = pa.array(OptimizedTypedSequence(lowerCamelCase__, col=lowerCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_a = copy.deepcopy(lowerCamelCase__ )
_a = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCamelCase__, lowerCamelCase__ )
_a = pa.array(OptimizedTypedSequence(lowerCamelCase__, col=lowerCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception", [False, True] )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Any ):
_a = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=lowerCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _lowercase ( lowerCamelCase__ : Tuple ):
_a = "mock://dataset-train.arrow"
with ArrowWriter(path=lowerCamelCase__, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(lowerCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCamelCase__ )
def _lowercase ( ):
_a = pa.BufferOutputStream()
with ParquetWriter(stream=lowerCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_a = pa.BufferReader(output.getvalue() )
_a = pq.read_table(lowerCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files", [False, True] )
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any ):
import PIL.Image
_a = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(lowerCamelCase__, format="png" )
_a = pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCamelCase__, features=Features({"image": Image()} ), embed_local_files=lowerCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_a = pa.BufferReader(output.getvalue() )
_a = pq.read_table(lowerCamelCase__ )
_a = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"], lowerCamelCase__ )
with open(lowerCamelCase__, "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _lowercase ( ):
_a = pa.schema([pa.field("col_1", pa.string(), nullable=lowerCamelCase__ )] )
_a = pa.BufferOutputStream()
with ArrowWriter(stream=lowerCamelCase__ ) as writer:
writer._build_writer(inferred_schema=lowerCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1", pa.string() )] )
| 691
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691
| 1
|
def UpperCamelCase_( _snake_case : list ):
"""simple docstring"""
__a =len(_snake_case )
for _ in range(_snake_case ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__a , __a =arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_lowerCAmelCase : Dict = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 242
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : List[Any] = False
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
_lowerCAmelCase : List[Any] = parser.parse_args()
_lowerCAmelCase : Any = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
_lowerCAmelCase : Tuple = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
_lowerCAmelCase : str = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
_lowerCAmelCase : int = reader.read()
_lowerCAmelCase : Optional[Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
_lowerCAmelCase : Any = UNetaDModel(**config)
else:
_lowerCAmelCase : Any = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
_lowerCAmelCase : str = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_lowerCAmelCase : Optional[int] = config[key]
del config[key]
_lowerCAmelCase : int = [k.replace("UNetRes", "") for k in config["down_block_types"]]
_lowerCAmelCase : str = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
_lowerCAmelCase : List[Any] = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
_lowerCAmelCase : Dict = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
_lowerCAmelCase : Any = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
_lowerCAmelCase : Tuple = param_value
_lowerCAmelCase : int = True
if not has_changed:
_lowerCAmelCase : Optional[int] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 242
| 1
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger()
@dataclass
class __UpperCamelCase :
__A = 42
__A = field(default_factory=_UpperCAmelCase )
__A = field(default_factory=_UpperCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = len(list(m.modules() ) ) == 1 or isinstance(_lowerCAmelCase , nn.Convad ) or isinstance(_lowerCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_lowerCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def _a ( self ) -> Any:
'''simple docstring'''
return list(filter(lambda _lowerCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __UpperCamelCase :
__A = 42
__A = 42
__A = 0
__A = field(default_factory=_UpperCAmelCase )
__A = field(default_factory=_UpperCAmelCase )
def __call__( self , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase = Tracker(self.dest )(_lowerCAmelCase ).parametrized
lowercase = Tracker(self.src )(_lowerCAmelCase ).parametrized
lowercase = list(filter(lambda _lowerCAmelCase : type(_lowerCAmelCase ) not in self.src_skip , _lowerCAmelCase ) )
lowercase = list(filter(lambda _lowerCAmelCase : type(_lowerCAmelCase ) not in self.dest_skip , _lowerCAmelCase ) )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(_lowerCAmelCase )} operations while"""
F""" destination module has {len(_lowerCAmelCase )}.""" )
for dest_m, src_m in zip(_lowerCAmelCase , _lowerCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : ResNetConfig , lowercase_ : Path , lowercase_ : bool = True ):
print(F"""Converting {name}...""" )
with torch.no_grad():
lowercase = timm.create_model(lowercase_ , pretrained=lowercase_ ).eval()
lowercase = ResNetForImageClassification(lowercase_ ).eval()
lowercase = ModuleTransfer(src=lowercase_ , dest=lowercase_ )
lowercase = torch.randn((1, 3, 224, 224) )
module_transfer(lowercase_ )
assert torch.allclose(from_model(lowercase_ ) , our_model(lowercase_ ).logits ), "The model logits don't match the original one."
lowercase = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(lowercase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=lowercase_ , )
# we can use the convnext one
lowercase = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=lowercase_ , )
print(F"""Pushed {checkpoint_name}""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Path , lowercase_ : str = None , lowercase_ : bool = True ):
lowercase = """imagenet-1k-id2label.json"""
lowercase = 1000
lowercase = (1, num_labels)
lowercase = """huggingface/label-files"""
lowercase = num_labels
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = partial(lowercase_ , num_labels=lowercase_ , idalabel=lowercase_ , labelaid=lowercase_ )
lowercase = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(lowercase_ , names_to_config[model_name] , lowercase_ , lowercase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return config, expected_shape
if __name__ == "__main__":
lowercase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowercase_ : int = parser.parse_args()
lowercase_ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 653
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a: List[str] = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Tuple = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__a: Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__a: Tuple = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[Any] = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__a: Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase ( lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : str ):
'''simple docstring'''
a__ = s.rsplit(lowercase__ , lowercase__ )
return new.join(lowercase__ )
def UpperCAmelCase ( lowercase__ : Dict ):
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
a__ = {}
a__ = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
a__ = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
a__ = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
a__ = rreplace(lowercase__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
a__ = rreplace(lowercase__ , """.b""" , """.bias""" , 1 )
a__ = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase ( lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : int=None , lowercase__ : Dict=True ):
'''simple docstring'''
from dall_e import Encoder
a__ = Encoder()
if os.path.exists(lowercase__ ):
a__ = torch.load(lowercase__ )
else:
a__ = torch.hub.load_state_dict_from_url(lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
a__ = ckpt.state_dict()
encoder.load_state_dict(lowercase__ )
if config_path is not None:
a__ = FlavaImageCodebookConfig.from_pretrained(lowercase__ )
else:
a__ = FlavaImageCodebookConfig()
a__ = FlavaImageCodebook(lowercase__ ).eval()
a__ = encoder.state_dict()
a__ = upgrade_state_dict(lowercase__ )
hf_model.load_state_dict(lowercase__ )
a__ = hf_model.state_dict()
a__ = count_parameters(lowercase__ )
a__ = count_parameters(lowercase__ )
assert torch.allclose(lowercase__ , lowercase__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowercase__ )
else:
return hf_state_dict
if __name__ == "__main__":
_lowercase : str =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_lowercase : Any =parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 711
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase=2 , lowerCamelCase=8 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=16 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=36 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.0_2 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ):
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
def _A ( self ):
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ = ids_tensor([self.batch_size] , self.num_choices )
a__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def _A ( self ):
'''simple docstring'''
a__ = self.get_config()
a__ = 300
return config
def _A ( self ):
'''simple docstring'''
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = self.prepare_config_and_inputs()
a__ = True
a__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
a__ = MraModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase )
a__ = model(lowerCamelCase , token_type_ids=lowerCamelCase )
a__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
a__ = True
a__ = MraModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
a__ = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
a__ = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
a__ = MraForMaskedLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
a__ = MraForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
a__ = self.num_labels
a__ = MraForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
a__ = self.num_labels
a__ = MraForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
a__ = self.num_choices
a__ = MraForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( A_ ,unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : int = False
A_ : List[str] = False
A_ : List[str] = False
A_ : Optional[Any] = False
A_ : List[str] = ()
def _A ( self ):
'''simple docstring'''
a__ = MraModelTester(self )
a__ = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def _A ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ = type
self.model_tester.create_and_check_model(*lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@slow
def _A ( self ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = MraModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip(reason="""MRA does not output attentions""" )
def _A ( self ):
'''simple docstring'''
return
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self ):
'''simple docstring'''
a__ = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
a__ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
a__ = model(lowerCamelCase )[0]
a__ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , lowerCamelCase )
a__ = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def _A ( self ):
'''simple docstring'''
a__ = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
a__ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
a__ = model(lowerCamelCase )[0]
a__ = 5_0265
a__ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , lowerCamelCase )
a__ = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def _A ( self ):
'''simple docstring'''
a__ = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
a__ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
a__ = model(lowerCamelCase )[0]
a__ = 5_0265
a__ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , lowerCamelCase )
a__ = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1e-4 ) )
| 412
| 0
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple:
A__ : Optional[int] =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowercase_, lowercase_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
A__ : List[Any] =list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A__ : List[str] =s_dict.pop(lowercase_ )
elif "subsample" in key:
A__ : List[Any] =s_dict.pop(lowercase_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]:
A__ : str =emb.weight.shape
A__ : Dict =nn.Linear(lowercase_, lowercase_, bias=lowercase_ )
A__ : str =emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]:
A__ : List[str] =torch.load(lowercase_, map_location='''cpu''' )
A__ : List[str] =mam_aaa['''args''']
A__ : List[str] =mam_aaa['''model''']
A__ : Any =state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : Union[str, Any] =state_dict['''decoder.embed_tokens.weight'''].shape[0]
A__ : str =args.share_decoder_input_output_embed
A__ : int =[int(lowercase_ ) for i in args.conv_kernel_sizes.split(''',''' )]
A__ : Tuple =SpeechaTextConfig(
vocab_size=lowercase_, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', num_conv_layers=len(lowercase_ ), conv_channels=args.conv_channels, conv_kernel_sizes=lowercase_, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=lowercase_, num_beams=5, max_length=2_0_0, use_cache=lowercase_, decoder_start_token_id=2, early_stopping=lowercase_, )
A__ : Optional[int] =SpeechaTextForConditionalGeneration(lowercase_ )
A__ : Union[str, Any] =model.model.load_state_dict(lowercase_, strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f' but all the following weights are missing {missing}' )
if tie_embeds:
A__ : Any =make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : Dict =lm_head_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__lowerCamelCase : List[str] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 416
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A (__UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = """unispeech"""
def __init__( self , lowercase_=32 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.02 , lowercase_=1E-5 , lowercase_="group" , lowercase_="gelu" , lowercase_=(512, 512, 512, 512, 512, 512, 512) , lowercase_=(5, 2, 2, 2, 2, 2, 2) , lowercase_=(10, 3, 3, 3, 3, 2, 2) , lowercase_=False , lowercase_=128 , lowercase_=16 , lowercase_=False , lowercase_=True , lowercase_=0.05 , lowercase_=10 , lowercase_=2 , lowercase_=0.0 , lowercase_=10 , lowercase_=0 , lowercase_=320 , lowercase_=2 , lowercase_=0.1 , lowercase_=100 , lowercase_=256 , lowercase_=256 , lowercase_=0.1 , lowercase_="mean" , lowercase_=False , lowercase_=False , lowercase_=256 , lowercase_=80 , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=0.5 , **lowercase_ , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
_snake_case : Dict = hidden_size
_snake_case : List[Any] = feat_extract_norm
_snake_case : Any = feat_extract_activation
_snake_case : str = list(lowercase_ )
_snake_case : Any = list(lowercase_ )
_snake_case : Dict = list(lowercase_ )
_snake_case : str = conv_bias
_snake_case : Optional[int] = num_conv_pos_embeddings
_snake_case : List[str] = num_conv_pos_embedding_groups
_snake_case : int = len(self.conv_dim )
_snake_case : str = num_hidden_layers
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_act
_snake_case : int = num_attention_heads
_snake_case : List[str] = hidden_dropout
_snake_case : Tuple = attention_dropout
_snake_case : List[str] = activation_dropout
_snake_case : Dict = feat_proj_dropout
_snake_case : Any = final_dropout
_snake_case : List[Any] = layerdrop
_snake_case : Optional[int] = layer_norm_eps
_snake_case : Any = initializer_range
_snake_case : Tuple = num_ctc_classes
_snake_case : Dict = vocab_size
_snake_case : List[str] = do_stable_layer_norm
_snake_case : List[str] = use_weighted_layer_sum
_snake_case : Optional[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case : Optional[Any] = apply_spec_augment
_snake_case : Union[str, Any] = mask_time_prob
_snake_case : Union[str, Any] = mask_time_length
_snake_case : str = mask_time_min_masks
_snake_case : Dict = mask_feature_prob
_snake_case : List[str] = mask_feature_length
_snake_case : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_snake_case : List[Any] = num_codevectors_per_group
_snake_case : Any = num_codevector_groups
_snake_case : Dict = contrastive_logits_temperature
_snake_case : str = feat_quantizer_dropout
_snake_case : Optional[int] = num_negatives
_snake_case : Optional[int] = codevector_dim
_snake_case : List[Any] = proj_codevector_dim
_snake_case : List[Any] = diversity_loss_weight
# ctc loss
_snake_case : Any = ctc_loss_reduction
_snake_case : str = ctc_zero_infinity
# pretraining loss
_snake_case : int = replace_prob
@property
def __a ( self ) -> int:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 326
| 0
|
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = [0] * len(_A )
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[Any] = [1] * len(_A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
_lowerCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_lowerCAmelCase : List[Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_A )
print(max(_A ) )
# Adjacency list of Graph
lowerCAmelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 630
|
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [10, 20, 30, 40, 50, 60]
_lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
_lowerCAmelCase : Dict = 100
self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Weight can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Profit can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(
snake_case__ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 630
| 1
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : Optional[Any] = 'Speech2TextFeatureExtractor'
snake_case__ : Optional[Any] = 'Speech2TextTokenizer'
def __init__( self : Optional[int] , A__ : Optional[int] , A__ : Tuple ):
"""simple docstring"""
super().__init__(A__ , A__ )
__lowerCamelCase : List[str] = self.feature_extractor
__lowerCamelCase : int = False
def __call__( self : int , *A__ : List[str] , **A__ : int ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*A__ , **A__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__lowerCamelCase : int = kwargs.pop("""raw_speech""" )
else:
__lowerCamelCase : Any = kwargs.pop("""audio""" , A__ )
__lowerCamelCase : Any = kwargs.pop("""sampling_rate""" , A__ )
__lowerCamelCase : Optional[int] = kwargs.pop("""text""" , A__ )
if len(A__ ) > 0:
__lowerCamelCase : Any = args[0]
__lowerCamelCase : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__lowerCamelCase : Tuple = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
if text is not None:
__lowerCamelCase : Optional[int] = self.tokenizer(A__ , **A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCamelCase : Optional[int] = encodings["""input_ids"""]
return inputs
def a_ ( self : Dict , *A__ : Union[str, Any] , **A__ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*A__ , **A__ )
def a_ ( self : Tuple , *A__ : Optional[Any] , **A__ : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*A__ , **A__ )
@contextmanager
def a_ ( self : List[Any] ):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__lowerCamelCase : Tuple = True
__lowerCamelCase : Optional[int] = self.tokenizer
yield
__lowerCamelCase : Union[str, Any] = self.feature_extractor
__lowerCamelCase : Any = False
| 150
|
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ :List[str] = logging.get_logger(__name__)
UpperCAmelCase__ :List[str] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : Dict = 'efficientformer'
def __init__( self : Optional[Any] , A__ : List[int] = [3, 2, 6, 4] , A__ : List[int] = [48, 96, 224, 448] , A__ : List[bool] = [True, True, True, True] , A__ : int = 448 , A__ : int = 32 , A__ : int = 4 , A__ : int = 7 , A__ : int = 5 , A__ : int = 8 , A__ : int = 4 , A__ : float = 0.0 , A__ : int = 16 , A__ : int = 3 , A__ : int = 3 , A__ : int = 3 , A__ : int = 2 , A__ : int = 1 , A__ : float = 0.0 , A__ : int = 1 , A__ : bool = True , A__ : bool = True , A__ : float = 1e-5 , A__ : str = "gelu" , A__ : float = 0.02 , A__ : float = 1e-1_2 , A__ : int = 224 , A__ : float = 1e-0_5 , **A__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**A__ )
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Any = hidden_sizes
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = initializer_range
__lowerCamelCase : Tuple = layer_norm_eps
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Union[str, Any] = num_channels
__lowerCamelCase : Dict = depths
__lowerCamelCase : Optional[Any] = mlp_expansion_ratio
__lowerCamelCase : int = downsamples
__lowerCamelCase : List[str] = dim
__lowerCamelCase : Dict = key_dim
__lowerCamelCase : List[Any] = attention_ratio
__lowerCamelCase : str = resolution
__lowerCamelCase : Union[str, Any] = pool_size
__lowerCamelCase : Optional[int] = downsample_patch_size
__lowerCamelCase : Any = downsample_stride
__lowerCamelCase : Dict = downsample_pad
__lowerCamelCase : int = drop_path_rate
__lowerCamelCase : Tuple = num_metaad_blocks
__lowerCamelCase : Optional[Any] = distillation
__lowerCamelCase : List[Any] = use_layer_scale
__lowerCamelCase : List[Any] = layer_scale_init_value
__lowerCamelCase : int = image_size
__lowerCamelCase : Dict = batch_norm_eps
| 150
| 1
|
from math import isqrt
def A (__A : int ) -> bool:
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(__A ) + 1 ) )
def A (__A : int = 10**6 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = 7
while prime_candidate < max_prime:
primes_count += is_prime(__A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"{solution() = }")
| 716
|
from __future__ import annotations
from typing import Any
class __snake_case :
def __init__( self : Dict , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = num_of_nodes
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
def lowerCamelCase ( self : Tuple , _snake_case : int , _snake_case : int , _snake_case : int):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight])
def lowerCamelCase ( self : List[Any] , _snake_case : int):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def lowerCamelCase ( self : Tuple , _snake_case : int):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCAmelCase_ = self.find_component(_snake_case)
def lowerCamelCase ( self : str , _snake_case : list[int] , _snake_case : int , _snake_case : int):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
UpperCAmelCase_ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_snake_case)
elif component_size[u_node] >= component_size[v_node]:
UpperCAmelCase_ = self.find_component(_snake_case)
component_size[u_node] += component_size[v_node]
self.set_component(_snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
UpperCAmelCase_ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = edge
UpperCAmelCase_ = self.m_component[u]
UpperCAmelCase_ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCAmelCase_ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = edge
UpperCAmelCase_ = self.m_component[u]
UpperCAmelCase_ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_snake_case , _snake_case , _snake_case)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
UpperCAmelCase_ = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def A () -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169
| 0
|
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = CpmAntTokenizer
A : Dict = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : List[str] = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
SCREAMING_SNAKE_CASE : Tuple = '今天天气真好!'
SCREAMING_SNAKE_CASE : Tuple = ['今天', '天气', '真', '好', '!']
SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(A )
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : int = '今天天气真好!'
SCREAMING_SNAKE_CASE : Dict = [tokenizer.bos_token] + tokens
SCREAMING_SNAKE_CASE : List[Any] = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(A )
self.assertEqual(A, A )
| 28
|
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28
| 1
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="last" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=0 , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_lengths
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = gelu_activation
__snake_case = sinusoidal_embeddings
__snake_case = causal
__snake_case = asm
__snake_case = n_langs
__snake_case = vocab_size
__snake_case = n_special
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = summary_type
__snake_case = use_proj
__snake_case = scope
__snake_case = bos_token_id
def __lowerCamelCase ( self ):
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_input_lengths:
__snake_case = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , 2 ).float()
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCamelCase ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
__snake_case = XLMModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__snake_case = model(lowerCamelCase_ , lengths=lowerCamelCase_ , langs=lowerCamelCase_ )
__snake_case = model(lowerCamelCase_ , langs=lowerCamelCase_ )
__snake_case = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
__snake_case = XLMWithLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__snake_case = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
__snake_case = XLMForQuestionAnsweringSimple(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__snake_case = model(lowerCamelCase_ )
__snake_case = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
__snake_case = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
__snake_case = XLMForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__snake_case = model(lowerCamelCase_ )
__snake_case = model(
lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , p_mask=lowerCamelCase_ , )
__snake_case = model(
lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , )
(__snake_case ) = result_with_labels.to_tuple()
__snake_case = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
(__snake_case ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
__snake_case = XLMForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__snake_case = model(lowerCamelCase_ )
__snake_case = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
__snake_case = self.num_labels
__snake_case = XLMForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__snake_case = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
__snake_case = self.num_choices
__snake_case = XLMForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__snake_case = self.prepare_config_and_inputs()
(
__snake_case
) = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase__ = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
__snake_case = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def __lowerCamelCase ( self ):
__snake_case = XLMModelTester(self )
__snake_case = ConfigTester(self , config_class=lowerCamelCase_ , emb_dim=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase_ )
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase_ )
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase_ )
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase_ )
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase_ )
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase_ )
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1 ):
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(
[isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCamelCase_ ) )
self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCamelCase_ ):
# adds PAD dummy token
__snake_case = min_length + idx + 1
__snake_case = min_length + idx + 1
__snake_case = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase_ ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1 ):
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(
[isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase_ ) , )
self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCamelCase_ ):
# adds PAD dummy token
__snake_case = min_length + idx + 1
__snake_case = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase_ ) , )
pass
@slow
def __lowerCamelCase ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = XLMModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class _lowerCamelCase (unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
__snake_case = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCamelCase_ )
__snake_case = torch.tensor([[14, 447]] , dtype=torch.long , device=lowerCamelCase_ ) # the president
__snake_case = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__snake_case = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase_ )
| 718
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase (lowerCamelCase , unittest.TestCase ):
lowercase__ = MgpstrTokenizer
lowercase__ = False
lowercase__ = {}
lowercase__ = False
def __lowerCamelCase ( self ):
super().setUp()
# fmt: off
__snake_case = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = 'tester'
__snake_case = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__snake_case = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__snake_case = tokenizer.encode([special_token] , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertTrue(special_token not in decoded )
def __lowerCamelCase ( self ):
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case , __snake_case = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(len(SCREAMING_SNAKE_CASE_ ) , 0 )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(text_a.replace(' ' , '' ) , SCREAMING_SNAKE_CASE_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def __lowerCamelCase ( self ):
pass
| 345
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowercase_ ( __A : str ) -> None:
"""simple docstring"""
lowercase , lowercase : Any =analyze_text(__A )
lowercase : str =list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
lowercase : str =sum(single_char_strings.values() )
# one length string
lowercase : List[Any] =0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowercase : Dict =single_char_strings[ch]
lowercase : Dict =my_str / all_sum
my_fir_sum += prob * math.loga(__A ) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum ):.1f}' )
# two len string
lowercase : int =sum(two_char_strings.values() )
lowercase : Tuple =0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowercase : str =cha + cha
if sequence in two_char_strings:
lowercase : Tuple =two_char_strings[sequence]
lowercase : Optional[Any] =int(__A ) / all_sum
my_sec_sum += prob * math.loga(__A )
# print second entropy
print(F'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def lowercase_ ( __A : str ) -> tuple[dict, dict]:
"""simple docstring"""
lowercase : Union[str, Any] =Counter() # type: ignore
lowercase : Optional[int] =Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__A ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowercase_ ( ) -> Optional[int]:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 94
|
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowerCamelCase_ ( ):
lowercase : Optional[Any] = input('''Enter message: ''' )
lowercase : Optional[Any] = input('''Enter key [alphanumeric]: ''' )
lowercase : Union[str, Any] = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowercase : str = '''encrypt'''
lowercase : Optional[Any] = encrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
elif mode.lower().startswith('''d''' ):
lowercase : str = '''decrypt'''
lowercase : Optional[int] = decrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
print(f'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , '''encrypt''' )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , '''decrypt''' )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowercase : Optional[Any] = []
lowercase : Tuple = 0
lowercase : str = key.upper()
for symbol in message:
lowercase : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase_ ):
lowercase : List[str] = 0
else:
translated.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 583
| 0
|
'''simple docstring'''
import os
from math import logaa
def __lowerCAmelCase ( snake_case__ = "base_exp.txt" ):
__UpperCamelCase : float = 0
__UpperCamelCase : Optional[Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case__ ) , snake_case__ ) ) ):
__UpperCamelCase , __UpperCamelCase : Dict = list(map(snake_case__ , line.split("," ) ) )
if x * logaa(snake_case__ ) > largest:
__UpperCamelCase : str = x * logaa(snake_case__ )
__UpperCamelCase : Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 399
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 399
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = "▁"
lowerCAmelCase_ : str = {"vocab_file": "sentencepiece.bpe.model"}
lowerCAmelCase_ : Tuple = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
lowerCAmelCase_ : Union[str, Any] = {
"facebook/xglm-564M": 2048,
}
class UpperCamelCase__ ( __lowerCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowerCamelCase : str , lowerCamelCase : Tuple="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Any="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : List[str] , ):
'''simple docstring'''
a__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
a__ = 7
a__ = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
a__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
a__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
a__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
a__ = len(self.sp_model )
a__ = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowerCamelCase )
a__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ):
'''simple docstring'''
a__ = self.__dict__.copy()
a__ = None
a__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , lowerCamelCase : Dict ):
'''simple docstring'''
a__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ = {}
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __a ( self : List[Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
a__ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __a ( self : Optional[Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase ))
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase ))
def __a ( self : List[Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
a__ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __a ( self : int ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __a ( self : Tuple ):
'''simple docstring'''
a__ = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self : str , lowerCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def __a ( self : Dict , lowerCamelCase : int ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ = self.sp_model.PieceToId(lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __a ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self : str , lowerCamelCase : int ):
'''simple docstring'''
a__ = "".join(lowerCamelCase ).replace(lowerCamelCase , " " ).strip()
return out_string
def __a ( self : Dict , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , "wb" ) as fi:
a__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 489
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCAmelCase_ : Optional[int] = TypeVar("_T")
class UpperCamelCase__ ( Generic[_T] ):
def __init__( self : Dict , lowerCamelCase : Iterable[_T] | None = None ):
'''simple docstring'''
a__ = list(iterable or [] )
a__ = []
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[Any] ):
'''simple docstring'''
return F'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def __a ( self : Union[str, Any] , lowerCamelCase : _T ):
'''simple docstring'''
self._stacka.append(lowerCamelCase )
def __a ( self : Dict ):
'''simple docstring'''
a__ = self._stacka.pop
a__ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 489
| 1
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCAmelCase__ : Optional[int] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCAmelCase__ : Optional[Any] = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCAmelCase__ : Optional[int] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = len([g for position, g in enumerate(_snake_case ) if g == main_target[position]] )
return (item, float(_snake_case ))
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = random.randint(0 ,len(_snake_case ) - 1 )
SCREAMING_SNAKE_CASE__ : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE__ : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = list(_snake_case )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = random.choice(_snake_case )
return "".join(_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,):
SCREAMING_SNAKE_CASE__ : str = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE__ : List[str] = int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE__ : List[str] = 10 if child_n >= 10 else child_n
for _ in range(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = population_score[random.randint(0 ,_snake_case )][0]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = crossover(parent_a[0] ,_snake_case )
# Append new string to the population list.
pop.append(mutate(_snake_case ,_snake_case ) )
pop.append(mutate(_snake_case ,_snake_case ) )
return pop
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE__ : Any = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_snake_case )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE__ : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE__ : int = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_snake_case )
# Generate random starting population.
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for _ in range(_snake_case ):
population.append("""""".join([random.choice(_snake_case ) for i in range(len(_snake_case ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_snake_case )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE__ : str = [evaluate(_snake_case ,_snake_case ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE__ : List[Any] = sorted(_snake_case ,key=lambda _snake_case : x[1] ,reverse=_snake_case )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE__ : Any = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_snake_case )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE__ : int = [
(item, score / len(_snake_case )) for item, score in population_score
]
# This is selection
for i in range(_snake_case ):
population.extend(select(population_score[int(_snake_case )] ,_snake_case ,_snake_case ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_snake_case ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCAmelCase__ : str = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
UpperCAmelCase__ : int = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 545
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : Tuple = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 545
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__a :List[Any] = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[str] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86
|
'''simple docstring'''
def A_ ( __SCREAMING_SNAKE_CASE : str ) -> Dict:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = [], []
while len(__SCREAMING_SNAKE_CASE ) > 1:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = min(__SCREAMING_SNAKE_CASE ), max(__SCREAMING_SNAKE_CASE )
start.append(__SCREAMING_SNAKE_CASE )
end.append(__SCREAMING_SNAKE_CASE )
collection.remove(__SCREAMING_SNAKE_CASE )
collection.remove(__SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_A = input("""Enter numbers separated by a comma:\n""").strip()
_A = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 158
| 0
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
snake_case_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
snake_case_ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowercase_ ( _lowercase : Vector , _lowercase : Vector ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_lowercase ) - np.asarray(_lowercase )) ** 2 ) )
def lowercase_ ( _lowercase : Vector , _lowercase : Vector ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_lowercase , _lowercase ) ) ** (1 / 2)
if __name__ == "__main__":
def lowercase_ ( ):
'''simple docstring'''
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) )
benchmark()
| 716
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase_ ( _lowercase : Optional[int] , _lowercase : int ):
'''simple docstring'''
assert isinstance(_lowercase , _lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowercase_ ( _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase : Dict = tmp_path / "cache"
UpperCAmelCase : str = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Tuple = TextDatasetReader(_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def lowercase_ ( _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : int = tmp_path / "cache"
UpperCAmelCase : Any = {"text": "string"}
UpperCAmelCase : List[str] = features.copy() if features else default_expected_features
UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : str = TextDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowercase_ ( _lowercase : int , _lowercase : Tuple , _lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase : str = tmp_path / "cache"
UpperCAmelCase : Any = {"text": "string"}
UpperCAmelCase : List[Any] = TextDatasetReader(_lowercase , cache_dir=_lowercase , split=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowercase_ ( _lowercase : Optional[int] , _lowercase : Any , _lowercase : Union[str, Any] ):
'''simple docstring'''
if issubclass(_lowercase , _lowercase ):
UpperCAmelCase : List[str] = text_path
elif issubclass(_lowercase , _lowercase ):
UpperCAmelCase : List[Any] = [text_path]
UpperCAmelCase : Union[str, Any] = tmp_path / "cache"
UpperCAmelCase : List[Any] = {"text": "string"}
UpperCAmelCase : Tuple = TextDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
def lowercase_ ( _lowercase : Dict , _lowercase : int , _lowercase : Optional[Any]=("train",) ):
'''simple docstring'''
assert isinstance(_lowercase , _lowercase )
for split in splits:
UpperCAmelCase : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowercase_ ( _lowercase : Dict , _lowercase : str , _lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase : str = tmp_path / "cache"
UpperCAmelCase : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Tuple = TextDatasetReader({"train": text_path} , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_text_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def lowercase_ ( _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : int ):
'''simple docstring'''
UpperCAmelCase : int = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
UpperCAmelCase : List[str] = {"text": "string"}
UpperCAmelCase : str = features.copy() if features else default_expected_features
UpperCAmelCase : Optional[int] = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Optional[int] = TextDatasetReader({"train": text_path} , features=_lowercase , cache_dir=_lowercase ).read()
_check_text_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowercase_ ( _lowercase : int , _lowercase : Tuple , _lowercase : Optional[Any] ):
'''simple docstring'''
if split:
UpperCAmelCase : Optional[Any] = {split: text_path}
else:
UpperCAmelCase : str = "train"
UpperCAmelCase : Union[str, Any] = {"train": text_path, "test": text_path}
UpperCAmelCase : Any = tmp_path / "cache"
UpperCAmelCase : List[Any] = {"text": "string"}
UpperCAmelCase : Any = TextDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_text_datasetdict(_lowercase , _lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 292
| 0
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=1 / 255 , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , snake_case__=True , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Tuple = min_resolution
_lowerCAmelCase : List[str] = max_resolution
_lowerCAmelCase : str = do_resize
_lowerCAmelCase : List[Any] = size
_lowerCAmelCase : Dict = do_rescale
_lowerCAmelCase : int = rescale_factor
_lowerCAmelCase : Optional[Any] = do_normalize
_lowerCAmelCase : Optional[Any] = image_mean
_lowerCAmelCase : int = image_std
_lowerCAmelCase : Any = do_pad
def a ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a ( self , snake_case__ , snake_case__=False ):
'''simple docstring'''
if not batched:
_lowerCAmelCase : str = image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
_lowerCAmelCase , _lowerCAmelCase : Dict = image.size
else:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
_lowerCAmelCase : Any = int(self.size['shortest_edge'] * h / w )
_lowerCAmelCase : Dict = self.size['shortest_edge']
elif w > h:
_lowerCAmelCase : str = self.size['shortest_edge']
_lowerCAmelCase : List[Any] = int(self.size['shortest_edge'] * w / h )
else:
_lowerCAmelCase : Optional[int] = self.size['shortest_edge']
_lowerCAmelCase : Union[str, Any] = self.size['shortest_edge']
else:
_lowerCAmelCase : Tuple = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCAmelCase : str = max(snake_case__ , key=lambda snake_case__ : item[0] )[0]
_lowerCAmelCase : Union[str, Any] = max(snake_case__ , key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = DetrImageProcessor if is_vision_available() else None
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = DetrImageProcessingTester(self )
@property
def a ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case__ , 'image_std' ) )
self.assertTrue(hasattr(snake_case__ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case__ , 'do_rescale' ) )
self.assertTrue(hasattr(snake_case__ , 'rescale_factor' ) )
self.assertTrue(hasattr(snake_case__ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case__ , 'size' ) )
self.assertTrue(hasattr(snake_case__ , 'do_pad' ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , snake_case__ )
_lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , snake_case__ )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
_lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase , _lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
_lowerCAmelCase : Tuple = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
_lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase : str = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase : Dict = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
_lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase : int = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase : Any = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_lowerCAmelCase : Optional[Any] = json.loads(f.read() )
_lowerCAmelCase : Any = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowerCAmelCase : Any = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
_lowerCAmelCase : Optional[Any] = image_processing(images=snake_case__ , annotations=snake_case__ , return_tensors='pt' )
# verify pixel values
_lowerCAmelCase : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case__ )
_lowerCAmelCase : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case__ , atol=1E-4 ) )
# verify area
_lowerCAmelCase : int = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case__ ) )
# verify boxes
_lowerCAmelCase : Any = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case__ )
_lowerCAmelCase : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case__ , atol=1E-3 ) )
# verify image_id
_lowerCAmelCase : List[Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case__ ) )
# verify is_crowd
_lowerCAmelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case__ ) )
# verify class_labels
_lowerCAmelCase : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case__ ) )
# verify orig_size
_lowerCAmelCase : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case__ ) )
# verify size
_lowerCAmelCase : Optional[int] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case__ ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_lowerCAmelCase : Optional[int] = json.loads(f.read() )
_lowerCAmelCase : Dict = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowerCAmelCase : Dict = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowerCAmelCase : List[str] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
_lowerCAmelCase : Optional[int] = image_processing(images=snake_case__ , annotations=snake_case__ , masks_path=snake_case__ , return_tensors='pt' )
# verify pixel values
_lowerCAmelCase : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case__ )
_lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case__ , atol=1E-4 ) )
# verify area
_lowerCAmelCase : Union[str, Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case__ ) )
# verify boxes
_lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case__ )
_lowerCAmelCase : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case__ , atol=1E-3 ) )
# verify image_id
_lowerCAmelCase : Optional[Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case__ ) )
# verify is_crowd
_lowerCAmelCase : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case__ ) )
# verify class_labels
_lowerCAmelCase : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case__ ) )
# verify masks
_lowerCAmelCase : List[Any] = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , snake_case__ )
# verify orig_size
_lowerCAmelCase : List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case__ ) )
# verify size
_lowerCAmelCase : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case__ ) )
| 444
|
'''simple docstring'''
from __future__ import annotations
from math import gcd
def lowercase (_A , _A = 2 , _A = 1 , _A = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_A , _A , _A ) -> int:
return (pow(_A , 2 ) + step) % modulus
for _ in range(_A ):
# These track the position within the cycle detection logic.
_lowerCAmelCase : Dict = seed
_lowerCAmelCase : int = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_lowerCAmelCase : str = rand_fn(_A , _A , _A )
_lowerCAmelCase : Optional[int] = rand_fn(_A , _A , _A )
_lowerCAmelCase : Union[str, Any] = rand_fn(_A , _A , _A )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_lowerCAmelCase : Optional[int] = gcd(hare - tortoise , _A )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_lowerCAmelCase : Tuple = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
lowerCAmelCase : List[str] = parser.parse_args()
lowerCAmelCase : List[str] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
lowerCAmelCase : Union[str, Any] = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 444
| 1
|
import cmath
import math
def __lowercase( __snake_case : float ,__snake_case : float ,__snake_case : float ,__snake_case : float ) -> complex:
__snake_case = math.radians(__snake_case )
__snake_case = math.radians(__snake_case )
# Convert voltage and current to rectangular form
__snake_case = cmath.rect(__snake_case ,__snake_case )
__snake_case = cmath.rect(__snake_case ,__snake_case )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase (lowerCamelCase , unittest.TestCase ):
lowercase__ = GPTSanJapaneseTokenizer
lowercase__ = False
lowercase__ = {"""do_clean_text""": False, """add_prefix_space""": False}
def __lowerCamelCase ( self ):
super().setUp()
# fmt: off
__snake_case = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
__snake_case = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
__snake_case = {'unk_token': '<unk>'}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
__snake_case = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case , __snake_case = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def __lowerCamelCase ( self ):
pass # TODO add if relevant
def __lowerCamelCase ( self ):
pass # TODO add if relevant
def __lowerCamelCase ( self ):
pass # TODO add if relevant
def __lowerCamelCase ( self ):
__snake_case = self.get_tokenizer()
# Testing tokenization
__snake_case = 'こんにちは、世界。 こんばんは、㔺界。'
__snake_case = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
__snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__snake_case = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__snake_case = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.get_tokenizer()
# Testing tokenization
__snake_case = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
__snake_case = 'こんにちは、、、、世界。こんばんは、、、、世界。'
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__snake_case = 'こんにちは、世界。'
__snake_case = 'こんばんは、㔺界。😀'
__snake_case = 'こんにちは、世界。こんばんは、世界。😀'
__snake_case = tokenizer.encode(prefix_text + input_text )
__snake_case = tokenizer.encode('' , prefix_text=prefix_text + input_text )
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__snake_case = 'こんにちは、世界。'
__snake_case = 'こんばんは、㔺界。😀'
__snake_case = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
__snake_case = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
__snake_case = [1] + [0] * (len_prefix + len_text + 1)
__snake_case = [1] * (len_prefix + len_text + 1) + [0]
__snake_case = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__snake_case = tokenizer(prefix_text + input_text ).token_type_ids
__snake_case = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__snake_case = tokenizer.encode('あンいワ' )
__snake_case = tokenizer.encode('' , prefix_text='あンいワ' )
__snake_case = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__snake_case = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
__snake_case = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
__snake_case = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__snake_case = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCamelCase ( self ):
# tokenizer has no padding token
pass
| 345
| 1
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 235
|
import collections
import importlib.util
import os
import re
from pathlib import Path
lowercase_ = """src/transformers"""
# Matches is_xxx_available()
lowercase_ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowercase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowercase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowercase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase_ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase_ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowercase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowercase_ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowercase_ = re.compile(R"""^\s*else:""")
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if _re_test_backend.search(_SCREAMING_SNAKE_CASE ) is None:
return None
lowercase__ = [b[0] for b in _re_backend.findall(_SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__ = f.readlines()
lowercase__ = 0
while line_index < len(_SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase__ = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ):
lowercase__ = _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ).groups()[0]
lowercase__ = re.findall('\[([^\]]+)\]' , _SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase__ = _re_import_struct_key_value.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase__ = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase__ = lines[line_index]
if _re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase__ = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(_SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_between_brackets.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase__ = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase__ = []
while (
line_index < len(_SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase__ = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
def find_duplicates(_SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(_SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase__ = []
for key in import_dict_objects.keys():
lowercase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowercase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase__ = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __UpperCamelCase () -> Tuple:
lowercase__ = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase__ = parse_init(_SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase__ = analyze_results(*_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(_SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase () -> Optional[int]:
lowercase__ = []
for path, directories, files in os.walk(_SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(_SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase__ = str((Path(_SCREAMING_SNAKE_CASE ) / folder).relative_to(_SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace(os.path.sep , '.' )
submodules.append(_SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase__ = str((Path(_SCREAMING_SNAKE_CASE ) / fname).relative_to(_SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(_SCREAMING_SNAKE_CASE )
return submodules
lowercase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __UpperCamelCase () -> List[Any]:
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ = importlib.util.spec_from_file_location(
'transformers' , os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase__ = spec.loader.load_module()
lowercase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 235
| 1
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __magic_name__ (__lowercase ):
# to overwrite at feature extractactor specific tests
lowerCamelCase__ = None
lowerCamelCase__ = None
@property
def __a ( self ) -> Tuple:
return self.feat_extract_tester.prepare_feat_extract_dict()
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , "feature_size" ) )
self.assertTrue(hasattr(_a , "sampling_rate" ) )
self.assertTrue(hasattr(_a , "padding_value" ) )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase_ = feat_extract.model_input_names[0]
lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a , processed_features[input_name] ) ) )
lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
lowerCAmelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase_ = feat_extract.model_input_names[0]
lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
lowerCAmelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __a ( self ) -> str:
lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase_ = feat_extract.model_input_names[0]
lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
lowerCAmelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __a ( self , _a=False ) -> Any:
def _inputs_have_equal_length(_a ):
lowerCAmelCase_ = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a , _a ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a , _a ):
if not np.allclose(np.asarray(_a ) , np.asarray(_a ) , atol=1E-3 ):
return False
return True
lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
lowerCAmelCase_ = feat_extract.model_input_names[0]
lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase_ = self.feat_extract_tester.seq_length_diff
lowerCAmelCase_ = self.feat_extract_tester.max_seq_length + pad_diff
lowerCAmelCase_ = self.feat_extract_tester.min_seq_length
lowerCAmelCase_ = self.feat_extract_tester.batch_size
lowerCAmelCase_ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowerCAmelCase_ = feat_extract.pad(_a , padding=_a )
lowerCAmelCase_ = input_a[input_name]
lowerCAmelCase_ = feat_extract.pad(_a , padding="longest" )
lowerCAmelCase_ = input_a[input_name]
lowerCAmelCase_ = feat_extract.pad(_a , padding="max_length" , max_length=len(speech_inputs[-1] ) )
lowerCAmelCase_ = input_a[input_name]
lowerCAmelCase_ = feat_extract.pad(_a , padding="longest" , return_tensors="np" )
lowerCAmelCase_ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_a ):
feat_extract.pad(_a , padding="max_length" )[input_name]
lowerCAmelCase_ = feat_extract.pad(
_a , padding="max_length" , max_length=_a , return_tensors="np" )
lowerCAmelCase_ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a , _a ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase_ = feat_extract.pad(_a , pad_to_multiple_of=10 )
lowerCAmelCase_ = input_a[input_name]
lowerCAmelCase_ = feat_extract.pad(_a , padding="longest" , pad_to_multiple_of=10 )
lowerCAmelCase_ = input_a[input_name]
lowerCAmelCase_ = feat_extract.pad(
_a , padding="max_length" , pad_to_multiple_of=10 , max_length=_a )
lowerCAmelCase_ = input_a[input_name]
lowerCAmelCase_ = feat_extract.pad(
_a , padding="max_length" , pad_to_multiple_of=10 , max_length=_a , return_tensors="np" , )
lowerCAmelCase_ = input_a[input_name]
self.assertTrue(all(len(_a ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_a , _a ) )
lowerCAmelCase_ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_a ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowerCAmelCase_ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __a ( self , _a=False ) -> int:
def _inputs_have_equal_length(_a ):
lowerCAmelCase_ = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a , _a ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a , _a ):
if not np.allclose(np.asarray(_a ) , np.asarray(_a ) , atol=1E-3 ):
return False
return True
lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
lowerCAmelCase_ = feat_extract.model_input_names[0]
lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowerCAmelCase_ = feat_extract.pad(
_a , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=_a )
lowerCAmelCase_ = input_a[input_name]
lowerCAmelCase_ = feat_extract.pad(_a , padding="max_length" , max_length=len(speech_inputs[0] ) )
lowerCAmelCase_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to smallest with np
lowerCAmelCase_ = feat_extract.pad(
_a , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=_a , )
lowerCAmelCase_ = input_a[input_name]
lowerCAmelCase_ = feat_extract.pad(
_a , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
lowerCAmelCase_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to middle
lowerCAmelCase_ = feat_extract.pad(
_a , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=_a , return_tensors="np" , )
lowerCAmelCase_ = input_a[input_name]
lowerCAmelCase_ = feat_extract.pad(
_a , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=_a )
lowerCAmelCase_ = input_a[input_name]
lowerCAmelCase_ = feat_extract.pad(
_a , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
lowerCAmelCase_ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a , _a ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a , truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a , padding="longest" , truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a , padding="longest" , truncation=_a )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_a ):
feat_extract.pad(_a , padding="max_length" , truncation=_a )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase_ = 12
lowerCAmelCase_ = feat_extract.pad(
_a , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_a , truncation=_a , )
lowerCAmelCase_ = input_a[input_name]
lowerCAmelCase_ = feat_extract.pad(
_a , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_a , )
lowerCAmelCase_ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowerCAmelCase_ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowerCAmelCase_ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
def __a ( self ) -> List[Any]:
self._check_padding(numpify=_a )
def __a ( self ) -> Union[str, Any]:
self._check_padding(numpify=_a )
def __a ( self ) -> Dict:
self._check_truncation(numpify=_a )
def __a ( self ) -> Dict:
self._check_truncation(numpify=_a )
@require_torch
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase_ = feat_extract.model_input_names[0]
lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase_ = feat_extract.pad(_a , padding="longest" , return_tensors="np" )[input_name]
lowerCAmelCase_ = feat_extract.pad(_a , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __a ( self ) -> Any:
lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase_ = feat_extract.model_input_names[0]
lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase_ = feat_extract.pad(_a , padding="longest" , return_tensors="np" )[input_name]
lowerCAmelCase_ = feat_extract.pad(_a , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.feat_extract_dict
lowerCAmelCase_ = True
lowerCAmelCase_ = self.feature_extraction_class(**_a )
lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase_ = [len(_a ) for x in speech_inputs]
lowerCAmelCase_ = feat_extract.model_input_names[0]
lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase_ = feat_extract.pad(_a , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , _a )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _a )
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.feat_extract_dict
lowerCAmelCase_ = True
lowerCAmelCase_ = self.feature_extraction_class(**_a )
lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase_ = [len(_a ) for x in speech_inputs]
lowerCAmelCase_ = feat_extract.model_input_names[0]
lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase_ = min(_a )
lowerCAmelCase_ = feat_extract.pad(
_a , padding="max_length" , max_length=_a , truncation=_a , return_tensors="np" )
self.assertIn("attention_mask" , _a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 226
|
from __future__ import annotations
lowerCamelCase__ = 8.9_88e9 # units = N * m^s * C^-2
def A(__a: float , __a: float , __a: float , __a: float ):
lowerCAmelCase_ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if distance < 0:
raise ValueError("Distance cannot be negative" )
if force == 0:
lowerCAmelCase_ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowerCAmelCase_ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowerCAmelCase_ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowerCAmelCase_ = (COULOMBS_CONSTANT * charge_product / abs(__a )) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case = 1_00, ):
__snake_case = x_start
__snake_case = fnc(snake_case)
__snake_case = 0.0
for _ in range(snake_case):
# Approximates curve as a sequence of linear lines and sums their length
__snake_case = (x_end - x_start) / steps + xa
__snake_case = fnc(snake_case)
length += math.hypot(xa - xa, fxa - fxa)
# Increment step
__snake_case = xa
__snake_case = fxa
return length
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( snake_case):
return math.sin(10 * x)
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
__lowercase : List[str] = 10
while i <= 10_0000:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 564
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def SCREAMING_SNAKE_CASE ( snake_case, snake_case=7):
__snake_case = None
if token is not None:
__snake_case = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"}
# The id of a workflow (not of a workflow run)
__snake_case = '''636036'''
__snake_case = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
__snake_case = requests.get(snake_case, headers=snake_case).json()
return result["workflow_runs"]
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = get_daily_ci_runs(snake_case)
__snake_case = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__snake_case = workflow_run['''id''']
break
return workflow_run_id
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
__snake_case = get_last_daily_ci_runs(snake_case)
if workflow_run_id is not None:
__snake_case = get_artifacts_links(worflow_run_id=snake_case, token=snake_case)
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__snake_case = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case, artifact_url=snake_case, output_dir=snake_case, token=snake_case)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
get_last_daily_ci_artifacts(snake_case, snake_case, snake_case)
__snake_case = {}
for artifact_name in artifact_names:
__snake_case = os.path.join(snake_case, f"{artifact_name}.zip")
if os.path.isfile(snake_case):
__snake_case = {}
with zipfile.ZipFile(snake_case) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case):
# read the file
with z.open(snake_case) as f:
__snake_case = f.read().decode('''UTF-8''')
return results
| 564
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = field(
metadata={'help': 'The output directory where the model will be written.'} ,)
_UpperCamelCase = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} ,)
_UpperCamelCase = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} ,)
_UpperCamelCase = field(
default=a ,metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
_UpperCamelCase = field(
default=a ,metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def A__ ( ):
lowerCamelCase__ = HfArgumentParser((ModelArguments,) )
((lowerCamelCase__) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowerCamelCase__ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowerCamelCase__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowerCamelCase__ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowerCamelCase__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__lowerCAmelCase , decoder_config=__lowerCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowerCamelCase__ = decoder_config.decoder_start_token_id
lowerCamelCase__ = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowerCamelCase__ = decoder_config.bos_token_id
if pad_token_id is None:
lowerCamelCase__ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowerCamelCase__ = decoder_config.eos_token_id
lowerCamelCase__ = decoder_start_token_id
lowerCamelCase__ = pad_token_id
lowerCamelCase__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowerCamelCase__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 9
|
'''simple docstring'''
from manim import *
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 )
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""CPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(1 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""GPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""Model""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,)
lowerCamelCase__ = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
lowerCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) )
self.add(_lowerCAmelCase )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for i, rect in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 )
cpu_target.move_to(_lowerCAmelCase )
cpu_target.generate_target()
lowerCamelCase__ = 0.46 / 4
lowerCamelCase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 )
cpu_targs.append(_lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) )
second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 9
| 1
|
from collections.abc import Generator
from math import sin
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: bytes ):
if len(lowerCAmelCase_ ) != 3_2:
raise ValueError("Input must be of length 32" )
snake_case_ : Optional[Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
if i < 0:
raise ValueError("Input must be non-negative" )
snake_case_ : int = format(lowerCAmelCase_ , "08x" )[-8:]
snake_case_ : Optional[int] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: bytes ):
snake_case_ : Dict = b""
for char in message:
bit_string += format(lowerCAmelCase_ , "08b" ).encode("utf-8" )
snake_case_ : Optional[int] = format(len(lowerCAmelCase_ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowerCAmelCase_ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: bytes ):
if len(lowerCAmelCase_ ) % 5_1_2 != 0:
raise ValueError("Input must have length that\'s a multiple of 512" )
for pos in range(0 , len(lowerCAmelCase_ ) , 5_1_2 ):
snake_case_ : str = bit_string[pos : pos + 5_1_2]
snake_case_ : Optional[int] = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
if i < 0:
raise ValueError("Input must be non-negative" )
snake_case_ : Union[str, Any] = format(lowerCAmelCase_ , "032b" )
snake_case_ : Union[str, Any] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowerCAmelCase_ , 2 )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: int ):
return (a + b) % 2**3_2
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: int ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: bytes ):
snake_case_ : Any = preprocess(lowerCAmelCase_ )
snake_case_ : Optional[Any] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
snake_case_ : int = 0x6745_2301
snake_case_ : List[Any] = 0xEFCD_AB89
snake_case_ : Optional[Any] = 0x98BA_DCFE
snake_case_ : Dict = 0x1032_5476
snake_case_ : str = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowerCAmelCase_ ):
snake_case_ : Optional[int] = aa
snake_case_ : Tuple = ba
snake_case_ : Union[str, Any] = ca
snake_case_ : Tuple = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
snake_case_ : str = d ^ (b & (c ^ d))
snake_case_ : int = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
snake_case_ : List[Any] = c ^ (d & (b ^ c))
snake_case_ : Optional[Any] = (5 * i + 1) % 1_6
elif i <= 4_7:
snake_case_ : Any = b ^ c ^ d
snake_case_ : List[str] = (3 * i + 5) % 1_6
else:
snake_case_ : int = c ^ (b | not_aa(lowerCAmelCase_ ))
snake_case_ : str = (7 * i) % 1_6
snake_case_ : Tuple = (f + a + added_consts[i] + block_words[g]) % 2**3_2
snake_case_ : Union[str, Any] = d
snake_case_ : List[Any] = c
snake_case_ : Union[str, Any] = b
snake_case_ : List[str] = sum_aa(lowerCAmelCase_ , left_rotate_aa(lowerCAmelCase_ , shift_amounts[i] ) )
# Add hashed chunk to running total
snake_case_ : Tuple = sum_aa(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = sum_aa(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Any = sum_aa(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Tuple = sum_aa(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : List[Any] = reformat_hex(lowerCAmelCase_ ) + reformat_hex(lowerCAmelCase_ ) + reformat_hex(lowerCAmelCase_ ) + reformat_hex(lowerCAmelCase_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = 'ylacombe/bark-small'
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = 'en_speaker_1'
UpperCAmelCase = 'This is a test string'
UpperCAmelCase = 'speaker_embeddings_path.json'
UpperCAmelCase = 'speaker_embeddings'
def snake_case_ ( self , **a_ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **a_ )
def snake_case_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BarkProcessor(tokenizer=a_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCAmelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase = 3_5
UpperCAmelCase = 2
UpperCAmelCase = 8
UpperCAmelCase = {
'semantic_prompt': np.ones(a_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase = processor(text=self.input_string , voice_preset=a_ )
UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(a_ , **a_ )
UpperCAmelCase = processor(text=self.input_string , voice_preset=a_ )
UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BarkProcessor(tokenizer=a_ )
UpperCAmelCase = processor(text=self.input_string )
UpperCAmelCase = tokenizer(
self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=a_ , return_attention_mask=a_ , return_token_type_ids=a_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 447
| 0
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_a : Any = logging.get_logger(__name__)
class a_ :
A__ : str
A__ : str = None
@staticmethod
def lowerCAmelCase( ):
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Any ):
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase( self : Any ):
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def lowerCAmelCase( cls : Optional[Any] ):
"""simple docstring"""
return F"`pip install {cls.pip_package or cls.name}`"
class a_ ( a ):
A__ : Optional[int] = 'optuna'
@staticmethod
def lowerCAmelCase( ):
"""simple docstring"""
return is_optuna_available()
def lowerCAmelCase( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : str ):
"""simple docstring"""
return run_hp_search_optuna(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return default_hp_space_optuna(UpperCAmelCase__ )
class a_ ( a ):
A__ : Union[str, Any] = 'ray'
A__ : Any = '\'ray[tune]\''
@staticmethod
def lowerCAmelCase( ):
"""simple docstring"""
return is_ray_available()
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : Tuple ):
"""simple docstring"""
return run_hp_search_ray(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
return default_hp_space_ray(UpperCAmelCase__ )
class a_ ( a ):
A__ : Any = 'sigopt'
@staticmethod
def lowerCAmelCase( ):
"""simple docstring"""
return is_sigopt_available()
def lowerCAmelCase( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
return run_hp_search_sigopt(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Dict ):
"""simple docstring"""
return default_hp_space_sigopt(UpperCAmelCase__ )
class a_ ( a ):
A__ : List[Any] = 'wandb'
@staticmethod
def lowerCAmelCase( ):
"""simple docstring"""
return is_wandb_available()
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : Any ):
"""simple docstring"""
return run_hp_search_wandb(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return default_hp_space_wandb(UpperCAmelCase__ )
_a : Optional[int] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def a_ ( ) -> str:
"""simple docstring"""
snake_case : List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__magic_name__ ) > 0:
snake_case : Dict = available_backends[0].name
if len(__magic_name__ ) > 1:
logger.info(
F"{len(__magic_name__ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 719
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[int],__A : Any,__A : Tuple=7,__A : List[Any]=3,__A : str=1_8,__A : Tuple=3_0,__A : Dict=4_0_0,__A : int=True,__A : List[Any]=None,__A : Any=True,__A : int=False,__A : str=True,__A : int=True,__A : Any=[0.5, 0.5, 0.5],__A : Dict=[0.5, 0.5, 0.5],):
_lowerCamelCase : str = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Tuple = min_resolution
_lowerCamelCase : List[str] = max_resolution
_lowerCamelCase : str = do_resize
_lowerCamelCase : int = size if size is not None else {"height": 1_8, "width": 2_0}
_lowerCamelCase : Union[str, Any] = do_thumbnail
_lowerCamelCase : Optional[int] = do_align_axis
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Tuple = do_normalize
_lowerCamelCase : str = image_mean
_lowerCamelCase : int = image_std
def lowerCamelCase_ ( self : List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A,"do_resize" ) )
self.assertTrue(hasattr(__A,"size" ) )
self.assertTrue(hasattr(__A,"do_thumbnail" ) )
self.assertTrue(hasattr(__A,"do_align_long_axis" ) )
self.assertTrue(hasattr(__A,"do_pad" ) )
self.assertTrue(hasattr(__A,"do_normalize" ) )
self.assertTrue(hasattr(__A,"image_mean" ) )
self.assertTrue(hasattr(__A,"image_std" ) )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{"height": 1_8, "width": 2_0} )
_lowerCamelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict,size=4_2 )
self.assertEqual(image_processor.size,{"height": 4_2, "width": 4_2} )
# Previous config had dimensions in (width, height) order
_lowerCamelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict,size=(4_2, 8_4) )
self.assertEqual(image_processor.size,{"height": 8_4, "width": 4_2} )
def lowerCamelCase_ ( self : List[Any] ):
pass
@is_flaky()
def lowerCamelCase_ ( self : Dict ):
# Initialize image_processing
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A,Image.Image )
# Test not batched input
_lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
# Test batched
_lowerCamelCase : int = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
@is_flaky()
def lowerCamelCase_ ( self : List[str] ):
# Initialize image_processing
_lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A,np.ndarray )
# Test not batched input
_lowerCamelCase : List[Any] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
# Test batched
_lowerCamelCase : List[Any] = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
@is_flaky()
def lowerCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A,torch.Tensor )
# Test not batched input
_lowerCamelCase : Tuple = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
# Test batched
_lowerCamelCase : int = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
| 44
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__)
snake_case__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self : Dict):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def __magic_name__ ( self : int):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : str):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 654
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[Any] ="""data2vec-text"""
def __init__( self : List[Any] , UpperCAmelCase_ : List[Any]=30522 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Dict=3072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[Any]=1e-1_2 , UpperCAmelCase_ : Union[str, Any]=1 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Tuple="absolute" , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
class lowercase__ ( _UpperCAmelCase ):
@property
def A_ ( self : int ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 400
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple ="""mvp"""
A__ : Optional[int] =["""past_key_values"""]
A__ : List[str] ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Dict , UpperCAmelCase_ : List[str]=50267 , UpperCAmelCase_ : Optional[Any]=1024 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Tuple=4096 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=4096 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : List[str]=1024 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=100 , UpperCAmelCase_ : Optional[Any]=800 , **UpperCAmelCase_ : Tuple , ):
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ = use_prompt
SCREAMING_SNAKE_CASE__ = prompt_length
SCREAMING_SNAKE_CASE__ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 400
| 1
|
_lowerCamelCase : List[str] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def _a ( SCREAMING_SNAKE_CASE__ : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Dict = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = """""".join(bin(lowercase__ )[2:].zfill(8 ) for byte in data )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(lowercase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
SCREAMING_SNAKE_CASE__ : int = b"""=""" * ((6 - len(lowercase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase__ ) % 6)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowercase__ ) , 6 ) ).encode()
+ padding
)
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) and not isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Dict = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(lowercase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase__ , lowercase__ ):
try:
SCREAMING_SNAKE_CASE__ : List[str] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
SCREAMING_SNAKE_CASE__ : int = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
SCREAMING_SNAKE_CASE__ : int = encoded_data[:-padding]
SCREAMING_SNAKE_CASE__ : Optional[int] = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
SCREAMING_SNAKE_CASE__ : List[str] = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )
SCREAMING_SNAKE_CASE__ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowercase__ ) , 8 )
]
return bytes(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :str = SavedModel()
UpperCamelCase__ :List[str] = []
with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase__ :Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ )
UpperCamelCase__ :List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowercase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowercase__ , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 45
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowercase_ = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 215
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
'''simple docstring'''
def __init__( self : Optional[int], _lowerCamelCase : Dict, _lowerCamelCase : Optional[Any]=13, _lowerCamelCase : str=32, _lowerCamelCase : str=2, _lowerCamelCase : str=3, _lowerCamelCase : Optional[Any]=16, _lowerCamelCase : int=[32, 64, 1_28], _lowerCamelCase : Dict=[1, 2, 1], _lowerCamelCase : Union[str, Any]=[2, 2, 4], _lowerCamelCase : Any=2, _lowerCamelCase : List[str]=2.0, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Dict=0.0, _lowerCamelCase : Optional[int]=0.0, _lowerCamelCase : str=0.1, _lowerCamelCase : Optional[int]="gelu", _lowerCamelCase : int=False, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : str=0.02, _lowerCamelCase : Tuple=1e-5, _lowerCamelCase : List[str]=True, _lowerCamelCase : List[str]=None, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Optional[Any]=10, _lowerCamelCase : str=8, _lowerCamelCase : Optional[int]=["stage1", "stage2"], _lowerCamelCase : Optional[int]=[1, 2], ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = embed_dim
__A = hidden_sizes
__A = depths
__A = num_heads
__A = window_size
__A = mlp_ratio
__A = qkv_bias
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = drop_path_rate
__A = hidden_act
__A = use_absolute_embeddings
__A = patch_norm
__A = layer_norm_eps
__A = initializer_range
__A = is_training
__A = scope
__A = use_labels
__A = type_sequence_label_size
__A = encoder_stride
__A = out_features
__A = out_indices
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size], self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, hidden_sizes=self.hidden_sizes, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any], _lowerCamelCase : Any, _lowerCamelCase : int ):
'''simple docstring'''
__A = FocalNetModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase )
__A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : str, _lowerCamelCase : List[Any], _lowerCamelCase : List[Any] ):
'''simple docstring'''
__A = FocalNetBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__A = None
__A = FocalNetBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : List[str], _lowerCamelCase : int, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
__A = FocalNetForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A = 1
__A = FocalNetForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[int], _lowerCamelCase : str, _lowerCamelCase : List[Any] ):
'''simple docstring'''
__A = self.type_sequence_label_size
__A = FocalNetForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase, labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = FocalNetForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A_ : Dict = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = False
A_ : Optional[int] = False
A_ : Union[str, Any] = False
A_ : List[Any] = False
A_ : Optional[int] = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = FocalNetModelTester(self )
__A = ConfigTester(self, config_class=_lowerCamelCase, embed_dim=37, has_text_modality=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
return
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__A = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase, nn.Linear ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__A = model_class(_lowerCamelCase )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ['''pixel_values''']
self.assertListEqual(arg_names[:1], _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Any, _lowerCamelCase : Any, _lowerCamelCase : List[Any], _lowerCamelCase : List[Any] ):
'''simple docstring'''
__A = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(_lowerCamelCase, _lowerCamelCase ) )
__A = outputs.hidden_states
__A = getattr(
self.model_tester, '''expected_num_hidden_layers''', len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ), _lowerCamelCase )
# FocalNet has a different seq_length
__A = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
__A = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCamelCase ), _lowerCamelCase )
__A , __A , __A , __A = reshaped_hidden_states[0].shape
__A = (
reshaped_hidden_states[0].view(_lowerCamelCase, _lowerCamelCase, height * width ).permute(0, 2, 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__A = True
self.check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
self.check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = 3
__A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__A = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__A = True
self.check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
self.check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, (padded_height, padded_width) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = FocalNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = _config_zero_init(_lowerCamelCase )
for model_class in self.all_model_classes:
__A = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'Parameter {name} of model {model_class} seems not properly initialized', )
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(_lowerCamelCase )
__A = self.default_image_processor
__A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__A = image_processor(images=_lowerCamelCase, return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__A = model(**_lowerCamelCase )
# verify the logits
__A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape, _lowerCamelCase )
__A = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _lowerCamelCase, atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(), 2_81 )
@require_torch
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A_ : str = FocalNetConfig
A_ : str = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = FocalNetModelTester(self )
| 215
| 1
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=13 , UpperCamelCase__ : List[str]=64 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : int=5 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Optional[Any]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Optional[Any]=10 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Union[str, Any]=[1, 16, 4, 4] , UpperCamelCase__ : int=None , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
A = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A = (self.image_size // 32) ** 2
A = num_patches + 1
def UpperCamelCase ( self : List[Any] ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : Tuple ):
A = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCamelCase__ , )
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
A = ViTHybridModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
A = self.type_sequence_label_size
A = ViTHybridForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self : List[str] ):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : List[str] = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCamelCase ( self : Optional[Any] ):
A = ViTHybridModelTester(self )
A = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCamelCase ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def UpperCamelCase ( self : Any ):
pass
def UpperCamelCase ( self : List[str] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCamelCase ( self : Optional[int] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(UpperCamelCase__ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCamelCase ( self : str ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def UpperCamelCase ( self : str ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
A = model_class(config=UpperCamelCase__ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCamelCase ( self : Optional[int] ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ViTHybridModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __UpperCamelCase () -> Optional[Any]:
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase ( self : Optional[int] ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self : Any ):
A = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCamelCase__ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A = model(**UpperCamelCase__ )
# verify the logits
A = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
def UpperCamelCase ( self : int ):
A = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
A = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
A = prepare_img()
A = image_processor(images=UpperCamelCase__ , return_tensors='pt' )
A = model(**UpperCamelCase__ )
A = outputs.logits
# model predicts one of the 1000 ImageNet classes
A = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 699
|
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699
| 1
|
def a_ ( lowerCAmelCase_ : int = 10 ):
if not isinstance(__UpperCamelCase, __UpperCamelCase ) or n < 0:
raise ValueError('Invalid input' )
__lowerCAmelCase = 10**n
__lowerCAmelCase = 2_8433 * (pow(2, 783_0457, __UpperCamelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 721
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_snake_case : Union[str, Any] = False
@skip_mps
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = StableDiffusionAttendAndExcitePipeline
a_ = False
a_ = TEXT_TO_IMAGE_PARAMS
a_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
a_ = TEXT_TO_IMAGE_IMAGE_PARAMS
a_ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowercase ( cls : Optional[Any] ) -> Optional[int]:
super().setUpClass()
torch.use_deterministic_algorithms(lowerCAmelCase_ )
@classmethod
def lowercase ( cls : List[str] ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase_ , )
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
__lowerCAmelCase = CLIPTextModel(lowerCAmelCase_ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str=0 ) -> Tuple:
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = __lowerCAmelCase = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = 'cpu'
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
__lowerCAmelCase = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
__lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_ , 1e-3 )
def lowercase ( self : Tuple ) -> Optional[int]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def lowercase ( self : Union[str, Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase ( self : List[Any] ) -> Any:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def lowercase ( self : Any ) -> Any:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowercase ( self : Tuple ) -> str:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def lowercase ( self : Optional[int] ) -> Any:
super().test_save_load_local(expected_max_difference=5e-4 )
def lowercase ( self : Optional[Any] ) -> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowercase ( cls : List[Any] ) -> List[Any]:
super().setUpClass()
torch.use_deterministic_algorithms(lowerCAmelCase_ )
@classmethod
def lowercase ( cls : Tuple ) -> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = torch.manual_seed(5_1 )
__lowerCAmelCase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
pipe.to('cuda' )
__lowerCAmelCase = 'a painting of an elephant with glasses'
__lowerCAmelCase = [5, 7]
__lowerCAmelCase = pipe(
prompt=lowerCAmelCase_ , token_indices=lowerCAmelCase_ , guidance_scale=7.5 , generator=lowerCAmelCase_ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
__lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 421
| 0
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( __A ):
def A ( self ) -> Dict:
"""simple docstring"""
a_ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase_ , """embed_dim""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , """num_heads""" ) )
class snake_case__ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=64 , UpperCamelCase_=3 , UpperCamelCase_=[16, 48, 96] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=2 , ) -> int:
"""simple docstring"""
a_ : List[str] = parent
a_ : str = batch_size
a_ : List[str] = image_size
a_ : Union[str, Any] = patch_sizes
a_ : Any = patch_stride
a_ : str = patch_padding
a_ : Union[str, Any] = is_training
a_ : Optional[Any] = use_labels
a_ : Union[str, Any] = num_labels
a_ : int = num_channels
a_ : List[str] = embed_dim
a_ : Any = num_heads
a_ : List[Any] = stride_kv
a_ : str = depth
a_ : List[str] = cls_token
a_ : Optional[int] = attention_drop_rate
a_ : Dict = initializer_range
a_ : List[str] = layer_norm_eps
def A ( self ) -> List[str]:
"""simple docstring"""
a_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Union[str, Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
a_ : Dict = ids_tensor([self.batch_size] , self.num_labels )
a_ : List[str] = self.get_config()
return config, pixel_values, labels
def A ( self ) -> Any:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
a_ : int = TFCvtModel(config=UpperCamelCase_ )
a_ : Any = model(UpperCamelCase_ , training=UpperCamelCase_ )
a_ : Optional[int] = (self.image_size, self.image_size)
a_ , a_ : Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
a_ : str = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
a_ : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
"""simple docstring"""
a_ : Dict = self.num_labels
a_ : List[str] = TFCvtForImageClassification(UpperCamelCase_ )
a_ : Tuple = model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
a_ : Optional[int] = self.prepare_config_and_inputs()
a_ , a_ , a_ : Tuple = config_and_inputs
a_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__ ( __A , __A , unittest.TestCase ):
UpperCAmelCase : Tuple = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
UpperCAmelCase : Optional[Any] = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : List[Any] = False
def A ( self ) -> Optional[Any]:
"""simple docstring"""
a_ : Any = TFCvtModelTester(self )
a_ : Dict = TFCvtConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def A ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def A ( self ) -> int:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def A ( self ) -> List[str]:
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def A ( self ) -> Tuple:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
a_ : List[Any] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(UpperCamelCase_ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def A ( self ) -> List[str]:
"""simple docstring"""
a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[Any] = model_class(UpperCamelCase_ )
a_ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Tuple = [*signature.parameters.keys()]
a_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
a_ : List[str] = model_class(UpperCamelCase_ )
a_ : List[str] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
a_ : str = outputs.hidden_states
a_ : int = len(self.model_tester.depth )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : str = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def A ( self ) -> List[str]:
"""simple docstring"""
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def A ( self ) -> Tuple:
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : int = TFCvtModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def A ( self ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self ) -> Optional[Any]:
"""simple docstring"""
a_ : Any = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
a_ : Union[str, Any] = self.default_image_processor
a_ : Optional[Any] = prepare_img()
a_ : Optional[int] = image_processor(images=UpperCamelCase_ , return_tensors="""tf""" )
# forward pass
a_ : int = model(**UpperCamelCase_ )
# verify the logits
a_ : Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
a_ : Any = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCamelCase_ , atol=1e-4 ) )
| 419
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : Optional[int] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 419
| 1
|
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
return np.dot(__UpperCAmelCase, __UpperCAmelCase )
class a :
def __init__( self : int , *,
lowercase_ : float = np.inf , lowercase_ : str = "linear" , lowercase_ : float = 0.0 , ):
snake_case_ = regularization
snake_case_ = gamma
if kernel == "linear":
snake_case_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
snake_case_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
snake_case_ = F"Unknown kernel: {kernel}"
raise ValueError(lowercase_ )
def A_ ( self : List[Any] , lowercase_ : ndarray , lowercase_ : ndarray ):
return np.dot(lowercase_ , lowercase_ )
def A_ ( self : int , lowercase_ : ndarray , lowercase_ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def A_ ( self : Union[str, Any] , lowercase_ : list[ndarray] , lowercase_ : ndarray ):
snake_case_ = observations
snake_case_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((snake_case_) ,) = np.shape(lowercase_ )
def to_minimize(lowercase_ : ndarray ) -> float:
snake_case_ = 0
((snake_case_) ,) = np.shape(lowercase_ )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(lowercase_ )
snake_case_ = LinearConstraint(lowercase_ , 0 , 0 )
snake_case_ = Bounds(0 , self.regularization )
snake_case_ = minimize(
lowercase_ , np.ones(lowercase_ ) , bounds=lowercase_ , constraints=[ly_contraint] ).x
snake_case_ = l_star
# calculating mean offset of separation plane to points
snake_case_ = 0
for i in range(lowercase_ ):
for j in range(lowercase_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
snake_case_ = s / n
def A_ ( self : Any , lowercase_ : ndarray ):
snake_case_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
'''simple docstring'''
from collections import defaultdict
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = 1
snake_case_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(__UpperCAmelCase )
if ret % 2 == 0:
cuts.append(__UpperCAmelCase )
return ret
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
a ,a : int = 10, 9
a : Any = defaultdict(list)
a : dict[int, bool] = {}
a : list[int] = []
a : List[Any] = 0
a : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 593
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=2 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=10 , _UpperCamelCase=3 , _UpperCamelCase=32 * 4 , _UpperCamelCase=32 * 6 , _UpperCamelCase=4 , _UpperCamelCase=32 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = is_training
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = num_queries
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_size
_UpperCAmelCase = max_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = mask_feature_size
def UpperCamelCase( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCamelCase )
_UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCamelCase )
_UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCamelCase ) > 0.5
).float()
_UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCamelCase ) > 0.5).long()
_UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase( self ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = output.encoder_hidden_states
_UpperCAmelCase = output.pixel_decoder_hidden_states
_UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCamelCase ) , config.decoder_config.decoder_layers )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
with torch.no_grad():
_UpperCAmelCase = MaskFormerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , output_hidden_states=_UpperCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = MaskFormerForInstanceSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
def comm_check_on_output(_UpperCamelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase = model(pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase )
comm_check_on_output(_UpperCamelCase )
_UpperCAmelCase = model(
pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase )
comm_check_on_output(_UpperCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : int = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__A : int = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__A : List[str] = False
__A : Optional[Any] = False
__A : Union[str, Any] = False
__A : Optional[Any] = False
def UpperCamelCase( self ):
_UpperCAmelCase = MaskFormerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCamelCase , **_UpperCamelCase , output_hidden_states=_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_UpperCamelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def UpperCamelCase( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCamelCase( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
_UpperCAmelCase = MaskFormerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = (self.model_tester.min_size,) * 2
_UpperCAmelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_UpperCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_UpperCamelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=_UpperCamelCase ).long(),
}
_UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_UpperCamelCase )
_UpperCAmelCase = model(**_UpperCamelCase )
self.assertTrue(outputs.loss is not None )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCamelCase , **_UpperCamelCase , output_hidden_states=_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase ).to(_UpperCamelCase )
_UpperCAmelCase = model(**_UpperCamelCase , output_attentions=_UpperCamelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCamelCase( self ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
_UpperCAmelCase = model(_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase ).loss
loss.backward()
def UpperCamelCase( self ):
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
_UpperCAmelCase = model(_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase )
_UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ = 1e-4
def A__ ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase( self ):
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def UpperCamelCase( self ):
_UpperCAmelCase = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
_UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
_UpperCAmelCase = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
_UpperCAmelCase = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def UpperCamelCase( self ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_UpperCamelCase )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
_UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
_UpperCAmelCase = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def UpperCamelCase( self ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(_UpperCamelCase )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
_UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_UpperCAmelCase = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def UpperCamelCase( self ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_UpperCamelCase )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_UpperCAmelCase = inputs['''pixel_values'''].to(_UpperCamelCase )
_UpperCAmelCase = [el.to(_UpperCamelCase ) for el in inputs['''mask_labels''']]
_UpperCAmelCase = [el.to(_UpperCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
self.assertTrue(outputs.loss is not None )
| 32
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
if start < end:
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 )
count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ )
return count
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ = start - 1
for index in range(UpperCamelCase_ , UpperCamelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ = new_pivot_index + 1
lowerCAmelCase__ = a[new_pivot_index]
lowerCAmelCase__ = a[index]
lowerCAmelCase__ = temp
lowerCAmelCase__ = a[new_pivot_index + 1]
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = temp
return new_pivot_index + 1, count
UpperCAmelCase__ : Tuple = TemporaryFile()
UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted
UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation
UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ : Optional[Any] = np.load(outfile)
UpperCAmelCase__ : Any = len(M) - 1
UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 48
| 0
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.dummy_uncond_unet
UpperCamelCase = ScoreSdeVeScheduler()
UpperCamelCase = ScoreSdeVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
sde_ve.to(__magic_name__ )
sde_ve.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__magic_name__ ).images
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__magic_name__ , return_dict=__magic_name__ )[
0
]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = """google/ncsnpp-church-256"""
UpperCamelCase = UNetaDModel.from_pretrained(__magic_name__ )
UpperCamelCase = ScoreSdeVeScheduler.from_pretrained(__magic_name__ )
UpperCamelCase = ScoreSdeVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
sde_ve.to(__magic_name__ )
sde_ve.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sde_ve(num_inference_steps=1_0 , output_type="""numpy""" , generator=__magic_name__ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
UpperCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 181
|
import math
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 100 ):
"""simple docstring"""
UpperCamelCase = sum(i * i for i in range(1 , n + 1 ) )
UpperCamelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 181
| 1
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
a = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
a = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
a = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" ,id="""sequence""" ) ,id="""references""" ),
} ) ,codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] ,reference_urls=[
"""https://github.com/m-popovic/chrF""",
] ,)
def UpperCAmelCase__ ( self : str ,lowerCamelCase : List[str] ,lowerCamelCase : Any ,lowerCamelCase : int = CHRF.CHAR_ORDER ,lowerCamelCase : int = CHRF.WORD_ORDER ,lowerCamelCase : int = CHRF.BETA ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(references[0] )
if any(len(lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCamelCase )]
__SCREAMING_SNAKE_CASE = CHRF(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = sb_chrf.corpus_score(lowerCamelCase ,lowerCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 109
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase__: int = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase__: Any = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase__: Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[str, float]:
SCREAMING_SNAKE_CASE_ : Any = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[str, str]:
SCREAMING_SNAKE_CASE_ : Tuple = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
SCREAMING_SNAKE_CASE_ : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE_ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE_ : Any = random.choice(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> list[str]:
SCREAMING_SNAKE_CASE_ : List[str] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE_ : Dict = int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE_ : str = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[int] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE )][0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = crossover(parent_a[0] , SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return pop
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE_ : List[Any] = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE_ : Tuple = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(SCREAMING_SNAKE_CASE )
# Generate random starting population.
SCREAMING_SNAKE_CASE_ : int = []
for _ in range(SCREAMING_SNAKE_CASE ):
population.append(''.join([random.choice(SCREAMING_SNAKE_CASE ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE_ : Optional[Any] = [evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE_ : int = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE_ : Tuple = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE_ : Dict = [
(item, score / len(SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase__: Optional[Any] = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCAmelCase__: List[Any] = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__: Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 345
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase_ : Any = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def A_ (__a , __a , __a , __a , __a , __a , __a , __a=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=__a , exist_ok=__a )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__a , __a , f=output_path.as_posix() , input_names=__a , output_names=__a , dynamic_axes=__a , do_constant_folding=__a , use_external_data_format=__a , enable_onnx_checker=__a , opset_version=__a , )
else:
export(
__a , __a , f=output_path.as_posix() , input_names=__a , output_names=__a , dynamic_axes=__a , do_constant_folding=__a , opset_version=__a , )
@torch.no_grad()
def A_ (__a , __a , __a , __a = False ):
'''simple docstring'''
A_ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A_ = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
A_ = "cpu"
A_ = Path(__a )
# VAE DECODER
A_ = AutoencoderKL.from_pretrained(model_path + "/vae" )
A_ = vae_decoder.config.latent_channels
# forward only through the decoder part
A_ = vae_decoder.decode
onnx_export(
__a , model_args=(
torch.randn(1 , __a , 25 , 25 ).to(device=__a , dtype=__a ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=__a , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCamelCase_ : Optional[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 482
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase_ : Dict = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class __lowerCAmelCase ( _lowercase , _lowercase ):
"""simple docstring"""
snake_case = "focalnet"
def __init__( self : Any , _snake_case : Optional[int]=224 , _snake_case : List[str]=4 , _snake_case : Any=3 , _snake_case : List[Any]=96 , _snake_case : Union[str, Any]=False , _snake_case : Optional[Any]=[192, 384, 768, 768] , _snake_case : List[str]=[2, 2, 6, 2] , _snake_case : Any=[2, 2, 2, 2] , _snake_case : List[Any]=[3, 3, 3, 3] , _snake_case : Optional[Any]="gelu" , _snake_case : Dict=4.0 , _snake_case : List[Any]=0.0 , _snake_case : List[str]=0.1 , _snake_case : Optional[Any]=False , _snake_case : Any=1e-4 , _snake_case : str=False , _snake_case : Optional[int]=False , _snake_case : Tuple=False , _snake_case : Any=0.0_2 , _snake_case : List[Any]=1e-5 , _snake_case : Union[str, Any]=32 , _snake_case : Tuple=None , _snake_case : Any=None , **_snake_case : Tuple , ) -> str:
"""simple docstring"""
super().__init__(**_snake_case )
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = embed_dim
A_ = use_conv_embed
A_ = hidden_sizes
A_ = depths
A_ = focal_levels
A_ = focal_windows
A_ = hidden_act
A_ = mlp_ratio
A_ = hidden_dropout_prob
A_ = drop_path_rate
A_ = use_layerscale
A_ = layerscale_value
A_ = use_post_layernorm
A_ = use_post_layernorm_in_modulation
A_ = normalize_modulator
A_ = initializer_range
A_ = layer_norm_eps
A_ = encoder_stride
A_ = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
A_ , A_ = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
| 482
| 1
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str=3 , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Tuple=[10, 20, 30, 40] , lowerCamelCase__ : Tuple=[1, 1, 2, 1] , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Tuple="relu" , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : int=None , ):
a__ : Optional[Any] = parent
a__ : Optional[int] = batch_size
a__ : Union[str, Any] = image_size
a__ : Dict = num_channels
a__ : Any = embeddings_size
a__ : int = hidden_sizes
a__ : Optional[int] = depths
a__ : List[str] = is_training
a__ : Dict = use_labels
a__ : int = hidden_act
a__ : Tuple = num_labels
a__ : Tuple = scope
a__ : str = len(lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Union[str, Any] = None
if self.use_labels:
a__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
a__ : Any = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase( self : Optional[Any] ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCamelCase( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any ):
a__ : str = TFResNetModel(config=lowerCamelCase__ )
a__ : Union[str, Any] = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ):
a__ : List[str] = self.num_labels
a__ : Any = TFResNetForImageClassification(lowerCamelCase__ )
a__ : Dict = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase( self : Optional[Any] ):
a__ : Any = self.prepare_config_and_inputs()
a__, a__, a__ : Union[str, Any] = config_and_inputs
a__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_lowercase = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Any ):
a__ : Tuple = TFResNetModelTester(self )
a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def _UpperCamelCase( self : int ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase( self : Any ):
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def _UpperCamelCase( self : Optional[Any] ):
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def _UpperCamelCase( self : Any ):
pass
def _UpperCamelCase( self : Union[str, Any] ):
a__, a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Tuple = model_class(lowerCamelCase__ )
a__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Tuple = [*signature.parameters.keys()]
a__ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _UpperCamelCase( self : int ):
def check_hidden_states_output(lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : str ):
a__ : Optional[int] = model_class(lowerCamelCase__ )
a__ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
a__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__ : str = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
a__ : str = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
a__ : Optional[int] = layer_type
a__ : List[str] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : List[Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def _UpperCamelCase( self : Dict ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : List[str] = TFResNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Tuple:
a__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : int ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _UpperCamelCase( self : Dict ):
a__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
a__ : Union[str, Any] = self.default_image_processor
a__ : Optional[Any] = prepare_img()
a__ : Dict = image_processor(images=lowerCamelCase__ , return_tensors="tf" )
# forward pass
a__ : Optional[int] = model(**lowerCamelCase__ )
# verify the logits
a__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
a__ : Union[str, Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase__ , atol=1E-4 ) )
| 37
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = ["""pixel_values"""]
def __init__( self : Any , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = 0.9 , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : str , ) -> None:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : int = size if size is not None else {'shortest_edge': 224}
lowerCamelCase__ : Tuple = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowerCamelCase__ : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase__ : str = get_size_dict(UpperCAmelCase , param_name='crop_size' )
lowerCamelCase__ : Tuple = do_resize
lowerCamelCase__ : str = size
lowerCamelCase__ : List[str] = crop_pct
lowerCamelCase__ : Any = resample
lowerCamelCase__ : Tuple = do_center_crop
lowerCamelCase__ : Any = crop_size
lowerCamelCase__ : Optional[int] = do_rescale
lowerCamelCase__ : Optional[Any] = rescale_factor
lowerCamelCase__ : Union[str, Any] = do_normalize
lowerCamelCase__ : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase__ : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A_ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : int , ) -> np.ndarray:
lowerCamelCase__ : Any = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowerCamelCase__ : List[Any] = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowerCamelCase__ : int = int(size['height'] / crop_pct )
else:
lowerCamelCase__ : Any = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(UpperCAmelCase ) )
lowerCamelCase__ : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase , size=UpperCAmelCase , default_to_square=UpperCAmelCase )
else:
if "shortest_edge" in size:
lowerCamelCase__ : int = get_resize_output_image_size(UpperCAmelCase , size=size['shortest_edge'] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
lowerCamelCase__ : List[Any] = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(UpperCAmelCase ) )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : str , ) -> np.ndarray:
lowerCamelCase__ : Union[str, Any] = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size['height'], size['width']) , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ) -> int:
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Dict , ) -> np.ndarray:
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : str , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Any , ) -> PIL.Image.Image:
lowerCamelCase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase__ : Any = resample if resample is not None else self.resample
lowerCamelCase__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Any = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : str = image_std if image_std is not None else self.image_std
lowerCamelCase__ : Optional[Any] = size if size is not None else self.size
lowerCamelCase__ : Optional[Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowerCamelCase__ : List[str] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : int = get_size_dict(UpperCAmelCase , param_name='crop_size' )
lowerCamelCase__ : Any = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Dict = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
lowerCamelCase__ : Any = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , crop_pct=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase__ : Optional[Any] = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : int = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase__ : List[Any] = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowerCamelCase__ : Dict = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowerCamelCase__ : Dict = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 295
| 0
|
'''simple docstring'''
import numpy as np
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: np.array ):
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: np.array ):
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pix2struct_text_model"
lowerCamelCase_ = ["past_key_values"]
lowerCamelCase_ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , __A :Any=5_0244 , __A :Optional[Any]=768 , __A :Tuple=64 , __A :List[str]=2048 , __A :int=12 , __A :str=12 , __A :Any=32 , __A :Tuple=128 , __A :int=0.1 , __A :str=1E-6 , __A :Optional[Any]=1.0 , __A :Union[str, Any]="gelu_new" , __A :Any=0 , __A :List[str]=False , __A :Optional[Any]=0 , __A :int=1 , __A :Optional[int]=False , __A :Optional[Any]=True , **__A :List[Any] , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = d_kv
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = num_layers
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = decoder_start_token_id
# for backwards compatibility
SCREAMING_SNAKE_CASE__ = dense_act_fn
super().__init__(
pad_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , tie_word_embeddings=__A , is_decoder=__A , **__A , )
@classmethod
def _snake_case ( cls :Optional[int] , __A :Union[str, os.PathLike] , **__A :Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__A , **__A )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pix2struct_vision_model"
def __init__( self :Optional[int] , __A :int=768 , __A :Optional[Any]=768 , __A :Union[str, Any]=2048 , __A :int=64 , __A :Union[str, Any]=12 , __A :str=12 , __A :Any="gelu_new" , __A :List[Any]=1E-6 , __A :Dict=0.0 , __A :int=0.0 , __A :int=1E-10 , __A :Dict=1.0 , __A :int=4096 , __A :int=32 , __A :int=128 , **__A :Tuple , ) -> str:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = patch_embed_hidden_size
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = dense_act_fn
SCREAMING_SNAKE_CASE__ = seq_len
SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ = d_kv
@classmethod
def _snake_case ( cls :str , __A :Union[str, os.PathLike] , **__A :str ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pix2struct"
lowerCamelCase_ = True
def __init__( self :str , __A :Optional[Any]=None , __A :List[str]=None , __A :Optional[Any]=1.0 , __A :Optional[Any]=0.0_2 , __A :Any=False , __A :Tuple=False , __A :Any=True , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(tie_word_embeddings=__A , is_encoder_decoder=__A , **__A )
if text_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE__ = PixaStructTextConfig(**__A )
SCREAMING_SNAKE_CASE__ = PixaStructVisionConfig(**__A )
SCREAMING_SNAKE_CASE__ = self.text_config.decoder_start_token_id
SCREAMING_SNAKE_CASE__ = self.text_config.pad_token_id
SCREAMING_SNAKE_CASE__ = self.text_config.eos_token_id
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = self.initializer_range
SCREAMING_SNAKE_CASE__ = self.initializer_range
SCREAMING_SNAKE_CASE__ = is_vqa
@classmethod
def _snake_case ( cls :Union[str, Any] , __A :PixaStructTextConfig , __A :PixaStructVisionConfig , **__A :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _snake_case ( self :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 59
| 0
|
from ..utils import DummyObject, requires_backends
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : Dict = ['''flax''', '''transformers''']
def __init__( self: Optional[int] , *UpperCamelCase_: Any , **UpperCamelCase_: List[str] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: int , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : str = ['''flax''', '''transformers''']
def __init__( self: Dict , *UpperCamelCase_: str , **UpperCamelCase_: Dict ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: Union[str, Any] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: Dict ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: Tuple , *UpperCamelCase_: List[Any] , **UpperCamelCase_: int ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : Dict = ['''flax''', '''transformers''']
def __init__( self: List[Any] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] , *UpperCamelCase_: Tuple , **UpperCamelCase_: Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: str , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : Union[str, Any] = ['''flax''', '''transformers''']
def __init__( self: List[str] , *UpperCamelCase_: List[str] , **UpperCamelCase_: Dict ) -> Any:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: int , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: Any ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: Any , *UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: Optional[int] ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
| 43
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger()
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : str, UpperCAmelCase_ : LevitConfig, UpperCAmelCase_ : Path, UpperCAmelCase_ : bool = True ) -> int:
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
A__ = timm.create_model("levit_128s", pretrained=UpperCAmelCase_ )
else:
A__ = timm.create_model("levit_128", pretrained=UpperCAmelCase_ )
if hidden_sizes == 192:
A__ = timm.create_model("levit_192", pretrained=UpperCAmelCase_ )
if hidden_sizes == 256:
A__ = timm.create_model("levit_256", pretrained=UpperCAmelCase_ )
if hidden_sizes == 384:
A__ = timm.create_model("levit_384", pretrained=UpperCAmelCase_ )
from_model.eval()
A__ = LevitForImageClassificationWithTeacher(UpperCAmelCase_ ).eval()
A__ = OrderedDict()
A__ = from_model.state_dict()
A__ = list(from_model.state_dict().keys() )
A__ = list(our_model.state_dict().keys() )
print(len(UpperCAmelCase_ ), len(UpperCAmelCase_ ) )
for i in range(len(UpperCAmelCase_ ) ):
A__ = weights[og_keys[i]]
our_model.load_state_dict(UpperCAmelCase_ )
A__ = torch.randn((2, 3, 224, 224) )
A__ = from_model(UpperCAmelCase_ )
A__ = our_model(UpperCAmelCase_ ).logits
assert torch.allclose(UpperCAmelCase_, UpperCAmelCase_ ), "The model logits don't match the original one."
A__ = name
print(UpperCAmelCase_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
A__ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def _lowerCamelCase ( UpperCAmelCase_ : Path, UpperCAmelCase_ : str = None, UpperCAmelCase_ : bool = True ) -> Union[str, Any]:
"""simple docstring"""
A__ = "imagenet-1k-id2label.json"
A__ = 1000
A__ = (1, num_labels)
A__ = "huggingface/label-files"
A__ = num_labels
A__ = json.load(open(hf_hub_download(UpperCAmelCase_, UpperCAmelCase_, repo_type="dataset" ), "r" ) )
A__ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = partial(UpperCAmelCase_, num_labels=UpperCAmelCase_, idalabel=UpperCAmelCase_, labelaid=UpperCAmelCase_ )
A__ = {
"levit-128S": 128,
"levit-128": 128,
"levit-192": 192,
"levit-256": 256,
"levit-384": 384,
}
A__ = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768], num_attention_heads=[6, 9, 12], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0.1, ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name], UpperCAmelCase_, names_to_config[model_name], UpperCAmelCase_, UpperCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name], UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 104
| 0
|
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : float, _UpperCamelCase : float ) -> float:
return round(float(moles / volume ) * nfactor )
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : float, _UpperCamelCase : float ) -> float:
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : float, _UpperCamelCase : float ) -> float:
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : float, _UpperCamelCase : float ) -> float:
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__snake_case : int = 'examples/'
__snake_case : Dict = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
__snake_case : List[str] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
__snake_case : int = 'README.md'
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : List[Any], _UpperCamelCase : List[str] ) -> int:
with open(_UpperCamelCase, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
A_ = f.read()
A_ ,A_ = REPLACE_PATTERNS[pattern]
A_ = replace.replace('''VERSION''', _UpperCamelCase )
A_ = re_pattern.sub(_UpperCamelCase, _UpperCamelCase )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.write(_UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : Any ) -> int:
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_UpperCamelCase, _UpperCamelCase ), _UpperCamelCase, pattern='''examples''' )
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : str=False ) -> List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _UpperCAmelCase ( ) -> Dict:
A_ = '''🤗 Transformers currently provides the following architectures'''
A_ = '''1. Want to contribute a new model?'''
with open(_UpperCamelCase, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
A_ = f.readlines()
# Find the start of the list.
A_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
A_ = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''', '''https://huggingface.co/docs/diffusers/model_doc''', )
index += 1
with open(_UpperCamelCase, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(_UpperCamelCase )
def _UpperCAmelCase ( ) -> List[Any]:
with open(REPLACE_FILES['''init'''], '''r''' ) as f:
A_ = f.read()
A_ = REPLACE_PATTERNS['''init'''][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : str=False ) -> Union[str, Any]:
A_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
A_ = default_version.base_version
elif patch:
A_ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
A_ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
A_ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_UpperCamelCase ) == 0:
A_ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_UpperCamelCase, patch=_UpperCamelCase )
def _UpperCAmelCase ( ) -> int:
A_ = get_version()
A_ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
A_ = current_version.base_version
# Check with the user we got that right.
A_ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_UpperCamelCase ) == 0:
A_ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
__snake_case : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 174
| 1
|
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def _A ( A__ ):
"""simple docstring"""
def decorator(A__ ):
__lowercase = getattr(A__ , '''handle_key''' , [] )
handle += [key]
setattr(A__ , '''handle_key''' , A__ )
return func
return decorator
def _A ( *A__ ):
"""simple docstring"""
def decorator(A__ ):
__lowercase = getattr(A__ , '''handle_key''' , [] )
handle += keys
setattr(A__ , '''handle_key''' , A__ )
return func
return decorator
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __new__( cls : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ):
__lowercase = super().__new__(cls ,lowercase__ ,lowercase__ ,lowercase__ )
if not hasattr(lowercase__ ,'''key_handler''' ):
setattr(lowercase__ ,'''key_handler''' ,{} )
setattr(lowercase__ ,'''handle_input''' ,KeyHandler.handle_input )
for value in attrs.values():
__lowercase = getattr(lowercase__ ,'''handle_key''' ,[] )
for key in handled_keys:
__lowercase = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] ):
__lowercase = get_character()
if char != KEYMAP["undefined"]:
__lowercase = ord(lowercase__ )
__lowercase = cls.key_handler.get(lowercase__ )
if handler:
__lowercase = char
return handler(cls )
else:
return None
def _A ( cls ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 41
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
__A : Dict = img
__A : Dict = img.shape[1]
__A : Tuple = img.shape[0]
__A : List[str] = dst_width
__A : Tuple = dst_height
__A : Optional[Any] = self.src_w / self.dst_w
__A : List[str] = self.src_h / self.dst_h
__A : Dict = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def snake_case__ ( self ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__A : List[str] = self.img[self.get_y(__lowercase )][self.get_x(__lowercase )]
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return int(self.ratio_x * x )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCAmelCase_ , UpperCAmelCase_ : str = 8_0_0, 6_0_0
UpperCAmelCase_ : str = imread('image_data/lena.jpg', 1)
UpperCAmelCase_ : List[str] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 365
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__: int = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: int = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Optional[Any] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Dict = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Optional[int] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
A__: Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
A__: List[str] = False
class A__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
_a : Optional[int] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : int =torch.manual_seed(0 )
_a : Any =pipe(
image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" , ).images
_a : Optional[int] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_a : Tuple =np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 506
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ):
_A = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
_A = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
sd_pipe.set_scheduler('sample_euler' )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe([prompt] , generator=snake_case_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
_A = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_A = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
sd_pipe.set_scheduler('sample_euler' )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe([prompt] , generator=snake_case_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowerCAmelCase__ ( self ):
_A = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_A = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe(
[prompt] , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=snake_case_ , )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 27
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Tuple = """sew"""
def __init__( self , lowerCamelCase_=3_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_=2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_="group" , lowerCamelCase_="gelu" , lowerCamelCase_=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase_=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase_=False , lowerCamelCase_=1_2_8 , lowerCamelCase_=1_6 , lowerCamelCase_=True , lowerCamelCase_=0.05 , lowerCamelCase_=1_0 , lowerCamelCase_=2 , lowerCamelCase_=0.0 , lowerCamelCase_=1_0 , lowerCamelCase_=0 , lowerCamelCase_="mean" , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=2_5_6 , lowerCamelCase_=0 , lowerCamelCase_=1 , lowerCamelCase_=2 , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
_a : Optional[int] = hidden_size
_a : int = feat_extract_norm
_a : Optional[int] = feat_extract_activation
_a : str = list(lowerCamelCase_ )
_a : Union[str, Any] = list(lowerCamelCase_ )
_a : List[Any] = list(lowerCamelCase_ )
_a : Union[str, Any] = conv_bias
_a : Optional[int] = num_conv_pos_embeddings
_a : Dict = num_conv_pos_embedding_groups
_a : str = len(self.conv_dim )
_a : Any = num_hidden_layers
_a : List[Any] = intermediate_size
_a : Tuple = squeeze_factor
_a : Tuple = hidden_act
_a : Any = num_attention_heads
_a : Optional[int] = hidden_dropout
_a : List[str] = attention_dropout
_a : Optional[Any] = activation_dropout
_a : str = feat_proj_dropout
_a : str = final_dropout
_a : str = layerdrop
_a : Optional[Any] = layer_norm_eps
_a : Optional[Any] = initializer_range
_a : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a : str = apply_spec_augment
_a : List[Any] = mask_time_prob
_a : Optional[Any] = mask_time_length
_a : Union[str, Any] = mask_time_min_masks
_a : List[str] = mask_feature_prob
_a : List[str] = mask_feature_length
_a : str = mask_feature_min_masks
# ctc loss
_a : Any = ctc_loss_reduction
_a : Optional[Any] = ctc_zero_infinity
# sequence classification
_a : List[Any] = use_weighted_layer_sum
_a : Tuple = classifier_proj_size
@property
def __UpperCamelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 120
| 0
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = "esm"
def __init__( self , A__=None , A__=None , A__=None , A__=768 , A__=12 , A__=12 , A__=3072 , A__=0.1 , A__=0.1 , A__=1026 , A__=0.02 , A__=1E-12 , A__="absolute" , A__=True , A__=None , A__=False , A__=False , A__=None , A__=None , **A__ , ):
"""simple docstring"""
super().__init__(pad_token_id=A__ , mask_token_id=A__ , **A__ )
UpperCAmelCase_: List[Any] = vocab_size
UpperCAmelCase_: int = hidden_size
UpperCAmelCase_: str = num_hidden_layers
UpperCAmelCase_: List[str] = num_attention_heads
UpperCAmelCase_: List[Any] = intermediate_size
UpperCAmelCase_: Tuple = hidden_dropout_prob
UpperCAmelCase_: List[str] = attention_probs_dropout_prob
UpperCAmelCase_: Optional[Any] = max_position_embeddings
UpperCAmelCase_: Optional[int] = initializer_range
UpperCAmelCase_: Dict = layer_norm_eps
UpperCAmelCase_: Tuple = position_embedding_type
UpperCAmelCase_: Dict = use_cache
UpperCAmelCase_: str = emb_layer_norm_before
UpperCAmelCase_: int = token_dropout
UpperCAmelCase_: int = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
UpperCAmelCase_: List[str] = EsmFoldConfig()
elif isinstance(A__ , A__ ):
UpperCAmelCase_: List[Any] = EsmFoldConfig(**A__ )
UpperCAmelCase_: Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
UpperCAmelCase_: Optional[Any] = get_default_vocab_list()
else:
UpperCAmelCase_: str = vocab_list
else:
UpperCAmelCase_: List[Any] = None
UpperCAmelCase_: Union[str, Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , A__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = super().to_dict()
if isinstance(self.esmfold_config , A__ ):
UpperCAmelCase_: Any = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase__ :
snake_case_ = None
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 0
snake_case_ = True
snake_case_ = False
snake_case_ = 128
snake_case_ = None
def snake_case_ ( self ):
"""simple docstring"""
if self.trunk is None:
UpperCAmelCase_: Tuple = TrunkConfig()
elif isinstance(self.trunk , A__ ):
UpperCAmelCase_: Dict = TrunkConfig(**self.trunk )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = asdict(self )
UpperCAmelCase_: List[str] = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase__ :
snake_case_ = 48
snake_case_ = 1024
snake_case_ = 128
snake_case_ = 32
snake_case_ = 32
snake_case_ = 32
snake_case_ = 0
snake_case_ = 0
snake_case_ = False
snake_case_ = 4
snake_case_ = 128
snake_case_ = None
def snake_case_ ( self ):
"""simple docstring"""
if self.structure_module is None:
UpperCAmelCase_: Dict = StructureModuleConfig()
elif isinstance(self.structure_module , A__ ):
UpperCAmelCase_: Optional[int] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
UpperCAmelCase_: Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase_: int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = asdict(self )
UpperCAmelCase_: int = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase__ :
snake_case_ = 384
snake_case_ = 128
snake_case_ = 16
snake_case_ = 128
snake_case_ = 12
snake_case_ = 4
snake_case_ = 8
snake_case_ = 0.1
snake_case_ = 8
snake_case_ = 1
snake_case_ = 2
snake_case_ = 7
snake_case_ = 10
snake_case_ = 1e-8
snake_case_ = 1e5
def snake_case_ ( self ):
"""simple docstring"""
return asdict(self )
def lowercase ( ) -> str:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 706
|
def lowercase ( _a = 2000000 ) -> int:
UpperCAmelCase_: List[str] = [0 for i in range(n + 1 )]
UpperCAmelCase_: str = 1
UpperCAmelCase_: Union[str, Any] = 1
for i in range(2 ,int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i ,n + 1 ,_a ):
UpperCAmelCase_: Union[str, Any] = 1
UpperCAmelCase_: Optional[Any] = 0
for i in range(_a ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 306
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class A:
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
_UpperCamelCase :Any = data
_UpperCamelCase :Node | None = None
class A:
"""simple docstring"""
def __init__( self ) -> int:
"""simple docstring"""
_UpperCamelCase :List[Any] = None
_UpperCamelCase :Optional[Any] = None
def __iter__( self ) -> Iterator[Any]:
"""simple docstring"""
_UpperCamelCase :Any = self.head
while self.head:
yield node.data
_UpperCamelCase :List[str] = node.next
if node == self.head:
break
def __len__( self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ) -> Any:
"""simple docstring"""
return "->".join(str(SCREAMING_SNAKE_CASE__ ) for item in iter(self ) )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
self.insert_nth(0 , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
_UpperCamelCase :str = Node(SCREAMING_SNAKE_CASE__ )
if self.head is None:
_UpperCamelCase :Union[str, Any] = new_node # first node points itself
_UpperCamelCase :Tuple = new_node
elif index == 0: # insert at head
_UpperCamelCase :Optional[Any] = self.head
_UpperCamelCase :Any = new_node
else:
_UpperCamelCase :Dict = self.head
for _ in range(index - 1 ):
_UpperCamelCase :Union[str, Any] = temp.next
_UpperCamelCase :Optional[Any] = temp.next
_UpperCamelCase :List[str] = new_node
if index == len(self ) - 1: # insert at tail
_UpperCamelCase :List[Any] = new_node
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
return self.delete_nth(0 )
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
_UpperCamelCase :int = self.head
if self.head == self.tail: # just one node
_UpperCamelCase :Dict = None
elif index == 0: # delete head node
_UpperCamelCase :List[str] = self.tail.next.next
_UpperCamelCase :Union[str, Any] = self.head.next
else:
_UpperCamelCase :str = self.head
for _ in range(index - 1 ):
_UpperCamelCase :List[Any] = temp.next
_UpperCamelCase :List[Any] = temp.next
_UpperCamelCase :Optional[Any] = temp.next.next
if index == len(self ) - 1: # delete at tail
_UpperCamelCase :Optional[int] = temp
return delete_node.data
def _UpperCamelCase( self ) -> bool:
"""simple docstring"""
return len(self ) == 0
def A_ ( ) -> None:
_UpperCamelCase :List[Any] = CircularLinkedList()
assert len(snake_case__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case__ ) == i
circular_linked_list.insert_nth(snake_case__ , i + 1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
"""simple docstring"""
import os
import sys
UpperCamelCase__ :Union[str, Any] = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase__ :List[Any] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def A_ ( *snake_case__ , **snake_case__ ) -> int:
return AutoConfig.from_pretrained(*snake_case__ , **snake_case__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def A_ ( *snake_case__ , **snake_case__ ) -> int:
return AutoTokenizer.from_pretrained(*snake_case__ , **snake_case__ )
@add_start_docstrings(AutoModel.__doc__ )
def A_ ( *snake_case__ , **snake_case__ ) -> Dict:
return AutoModel.from_pretrained(*snake_case__ , **snake_case__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def A_ ( *snake_case__ , **snake_case__ ) -> Dict:
return AutoModelForCausalLM.from_pretrained(*snake_case__ , **snake_case__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def A_ ( *snake_case__ , **snake_case__ ) -> Dict:
return AutoModelForMaskedLM.from_pretrained(*snake_case__ , **snake_case__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def A_ ( *snake_case__ , **snake_case__ ) -> Dict:
return AutoModelForSequenceClassification.from_pretrained(*snake_case__ , **snake_case__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def A_ ( *snake_case__ , **snake_case__ ) -> Union[str, Any]:
return AutoModelForQuestionAnswering.from_pretrained(*snake_case__ , **snake_case__ )
| 355
| 1
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__UpperCAmelCase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def _lowerCamelCase ( A_ : str , A_ : str ) -> List[Any]:
'''simple docstring'''
inspect_dataset(A_ , A_ )
UpperCamelCase__ : Dict =path + ".py"
assert script_name in os.listdir(A_ )
assert "__pycache__" not in os.listdir(A_ )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def _lowerCamelCase ( A_ : Optional[int] , A_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
inspect_metric(A_ , A_ )
UpperCamelCase__ : Union[str, Any] =path + ".py"
assert script_name in os.listdir(A_ )
assert "__pycache__" not in os.listdir(A_ )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _lowerCamelCase ( A_ : Optional[Any] , A_ : List[str] , A_ : List[str] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int =get_dataset_config_info(A_ , config_name=A_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _lowerCamelCase ( A_ : Union[str, Any] , A_ : List[Any] , A_ : List[Any] ) -> Tuple:
'''simple docstring'''
with pytest.raises(A_ ):
get_dataset_config_info(A_ , config_name=A_ )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def _lowerCamelCase ( A_ : Any , A_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[Any] =get_dataset_config_names(A_ )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def _lowerCamelCase ( A_ : Dict , A_ : Union[str, Any] , A_ : int ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : List[Any] =get_dataset_infos(A_ )
assert list(infos.keys() ) == expected_configs
UpperCamelCase__ : Dict =expected_configs[0]
assert expected_config in infos
UpperCamelCase__ : List[str] =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _lowerCamelCase ( A_ : List[Any] , A_ : Dict , A_ : Dict ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =get_dataset_infos(A_ )
assert expected_config in infos
UpperCamelCase__ : List[Any] =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _lowerCamelCase ( A_ : Tuple , A_ : int , A_ : Any ) -> str:
'''simple docstring'''
with pytest.raises(A_ ):
get_dataset_split_names(A_ , config_name=A_ )
| 582
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
snake_case__ = LEDTokenizer
snake_case__ = LEDTokenizerFast
snake_case__ = True
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
super().setUp()
UpperCamelCase__ : Any =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase__ : int =dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
UpperCamelCase__ : Optional[int] =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__ : Optional[int] ={"unk_token": "<unk>"}
UpperCamelCase__ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
UpperCamelCase__ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(__SCREAMING_SNAKE_CASE))
def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
return LEDTokenizer.from_pretrained("allenai/led-base-16384")
@cached_property
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384")
@require_torch
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase__ : Optional[int] =[0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Any =tokenizer(__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE) , padding=__SCREAMING_SNAKE_CASE , return_tensors="pt")
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
UpperCamelCase__ : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@require_torch
def UpperCAmelCase ( self) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] =["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : int =tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="pt")
self.assertIn("input_ids" , __SCREAMING_SNAKE_CASE)
self.assertIn("attention_mask" , __SCREAMING_SNAKE_CASE)
self.assertNotIn("labels" , __SCREAMING_SNAKE_CASE)
self.assertNotIn("decoder_attention_mask" , __SCREAMING_SNAKE_CASE)
@require_torch
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =[
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Tuple =tokenizer(text_target=__SCREAMING_SNAKE_CASE , max_length=32 , padding="max_length" , return_tensors="pt")
self.assertEqual(32 , targets["input_ids"].shape[1])
@require_torch
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Optional[int] =tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors="pt")
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(batch.input_ids.shape , (2, 51_22))
@require_torch
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =["A long paragraph for summarization."]
UpperCamelCase__ : Any =[
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : str =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="pt")
UpperCamelCase__ : str =tokenizer(text_target=__SCREAMING_SNAKE_CASE , return_tensors="pt")
UpperCamelCase__ : int =inputs["input_ids"]
UpperCamelCase__ : Tuple =targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@require_torch
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Any =["Summary of the text.", "Another summary."]
UpperCamelCase__ : List[str] =[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCamelCase__ : str =tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] =[[0] * len(__SCREAMING_SNAKE_CASE) for x in encoded_output["input_ids"]]
UpperCamelCase__ : Any =tokenizer.pad(__SCREAMING_SNAKE_CASE)
self.assertSequenceEqual(outputs["global_attention_mask"] , __SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
UpperCamelCase__ : Dict =self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[str] ="A, <mask> AllenNLP sentence."
UpperCamelCase__ : List[Any] =tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE)
self.assertEqual(sum(tokens_r["token_type_ids"]) , sum(tokens_p["token_type_ids"]))
self.assertEqual(
sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]) , sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]) , )
UpperCamelCase__ : List[str] =tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
UpperCamelCase__ : str =tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2])
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2])
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
| 582
| 1
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
a = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
a = {
'''ctrl''': 256,
}
a = {
'''Pregnancy''': 168_629,
'''Christianity''': 7_675,
'''Explain''': 106_423,
'''Fitness''': 63_440,
'''Saving''': 63_163,
'''Ask''': 27_171,
'''Ass''': 95_985,
'''Joke''': 163_509,
'''Questions''': 45_622,
'''Thoughts''': 49_605,
'''Retail''': 52_342,
'''Feminism''': 164_338,
'''Writing''': 11_992,
'''Atheism''': 192_263,
'''Netflix''': 48_616,
'''Computing''': 39_639,
'''Opinion''': 43_213,
'''Alone''': 44_967,
'''Funny''': 58_917,
'''Gaming''': 40_358,
'''Human''': 4_088,
'''India''': 1_331,
'''Joker''': 77_138,
'''Diet''': 36_206,
'''Legal''': 11_859,
'''Norman''': 4_939,
'''Tip''': 72_689,
'''Weight''': 52_343,
'''Movies''': 46_273,
'''Running''': 23_425,
'''Science''': 2_090,
'''Horror''': 37_793,
'''Confession''': 60_572,
'''Finance''': 12_250,
'''Politics''': 16_360,
'''Scary''': 191_985,
'''Support''': 12_654,
'''Technologies''': 32_516,
'''Teenage''': 66_160,
'''Event''': 32_769,
'''Learned''': 67_460,
'''Notion''': 182_770,
'''Wikipedia''': 37_583,
'''Books''': 6_665,
'''Extract''': 76_050,
'''Confessions''': 102_701,
'''Conspiracy''': 75_932,
'''Links''': 63_674,
'''Narcissus''': 150_425,
'''Relationship''': 54_766,
'''Relationships''': 134_796,
'''Reviews''': 41_671,
'''News''': 4_256,
'''Translation''': 26_820,
'''multilingual''': 128_406,
}
def _snake_case ( _snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
_A = set(_snake_case )
return pairs
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = VOCAB_FILES_NAMES
UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Optional[int] = CONTROL_CODES
def __init__( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple="<unk>" , **_UpperCAmelCase : Any ):
super().__init__(unk_token=_UpperCAmelCase , **_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
_A = json.load(_UpperCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
with open(_UpperCAmelCase , encoding='utf-8' ) as merges_handle:
_A = merges_handle.read().split('\n' )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_A = {}
@property
def lowerCAmelCase_ ( self : Dict ):
return len(self.encoder )
def lowerCAmelCase_ ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[str] ):
if token in self.cache:
return self.cache[token]
_A = tuple(_UpperCAmelCase )
_A = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
_A = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
_A = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(_UpperCAmelCase ):
try:
_A = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(_UpperCAmelCase )
_A = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
_A = get_pairs(_UpperCAmelCase )
_A = '@@ '.join(_UpperCAmelCase )
_A = word[:-4]
_A = word
return word
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Dict ):
_A = []
_A = re.findall(r'\S+\n?' , _UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_UpperCAmelCase ).split(' ' ) ) )
return split_tokens
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Dict ):
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Tuple ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
_A = ' '.join(_UpperCAmelCase ).replace('@@ ' , '' ).strip()
return out_string
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_A = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + '\n' )
_A = 0
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_A = token_index
writer.write(' '.join(_UpperCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 7
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''gpt_bigcode'''
UpperCAmelCase : str = ['''past_key_values''']
UpperCAmelCase : Dict = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , _UpperCAmelCase : Dict=50_257 , _UpperCAmelCase : List[Any]=1_024 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : str="gelu_pytorch_tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=50_256 , _UpperCAmelCase : Dict=50_256 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Any , ):
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = n_inner
_A = activation_function
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = scale_attn_weights
_A = use_cache
_A = attention_softmax_in_fpaa
_A = scale_attention_softmax_in_fpaa
_A = multi_query
_A = bos_token_id
_A = eos_token_id
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 7
| 1
|
_snake_case = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _A ( __magic_name__ ) -> str:
lowercase__ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
lowercase__ = Stack()
lowercase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__magic_name__ ) )
elif i in operators:
# RULE 2
operator_stack.push(__magic_name__ )
elif i == ")":
# RULE 4
lowercase__ = operator_stack.peek()
operator_stack.pop()
lowercase__ = operand_stack.peek()
operand_stack.pop()
lowercase__ = operand_stack.peek()
operand_stack.pop()
lowercase__ = operators[opr](__magic_name__ , __magic_name__ )
operand_stack.push(__magic_name__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_snake_case = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 721
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowercase_ )} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__lowerCamelCase = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
if self.train_file is not None:
lowercase__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowercase__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _A ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , "r" , encoding="utf-8" ) as f:
lowercase__ = [json.loads(__magic_name__ ) for line in f.read().splitlines() if (len(__magic_name__ ) > 0 and not line.isspace())]
assert len(__magic_name__ ) == len(__magic_name__ )
lowercase__ = {c: dataset[c] for c in dataset.column_names}
lowercase__ = refs
return Dataset.from_dict(__magic_name__ )
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __magic_name__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[:{data_args.validation_split_percentage}%]''' , )
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowercase__ = {}
if data_args.train_file is not None:
lowercase__ = data_args.train_file
if data_args.validation_file is not None:
lowercase__ = data_args.validation_file
lowercase__ = data_args.train_file.split("." )[-1]
if extension == "txt":
lowercase__ = "text"
lowercase__ = load_dataset(__magic_name__ , data_files=__magic_name__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase__ = AutoConfig.from_pretrained(model_args.config_name , **__magic_name__ )
elif model_args.model_name_or_path:
lowercase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
lowercase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
lowercase__ = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowercase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__magic_name__ )
elif model_args.model_name_or_path:
lowercase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
lowercase__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
lowercase__ = AutoModelForMaskedLM.from_config(__magic_name__ )
model.resize_token_embeddings(len(__magic_name__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowercase__ = datasets["train"].column_names
else:
lowercase__ = datasets["validation"].column_names
lowercase__ = "text" if "text" in column_names else column_names[0]
lowercase__ = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(__magic_name__ ):
# Remove empty lines
lowercase__ = [line for line in examples["text"] if len(__magic_name__ ) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=__magic_name__ , truncation=__magic_name__ , max_length=data_args.max_seq_length )
lowercase__ = datasets.map(
__magic_name__ , batched=__magic_name__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowercase__ = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowercase__ = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowercase__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowercase__ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowercase__ = DataCollatorForWholeWordMask(tokenizer=__magic_name__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase__ = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowercase__ = model_args.model_name_or_path
else:
lowercase__ = None
lowercase__ = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase__ = os.path.join(training_args.output_dir , "train_results.txt" )
if trainer.is_world_process_zero():
with open(__magic_name__ , "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ = trainer.evaluate()
lowercase__ = math.exp(eval_output["eval_loss"] )
lowercase__ = perplexity
lowercase__ = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(__magic_name__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def _A ( __magic_name__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 611
| 0
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ : List[str] = logging.get_logger(__name__)
lowercase_ : List[Any] = {'''vocab_file''': '''spiece.model'''}
lowercase_ : Optional[int] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowercase_ : Union[str, Any] = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ["""input_ids""", """attention_mask"""]
A__ = []
def __init__( self , snake_case__ , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
_SCREAMING_SNAKE_CASE : Dict = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
_SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
_SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
_SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
_SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_SCREAMING_SNAKE_CASE : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sep_token=snake_case__ , mask_token=snake_case__ , cls_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
_SCREAMING_SNAKE_CASE : Any = vocab_file
_SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
@property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Any = {}
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
return self.sp_model.piece_to_id(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.sp_model.IdToPiece(snake_case__ )
return token
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = []
_SCREAMING_SNAKE_CASE : Tuple = ""
_SCREAMING_SNAKE_CASE : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = []
else:
current_sub_tokens.append(snake_case__ )
_SCREAMING_SNAKE_CASE : int = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = False , snake_case__ = None , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("use_source_tokenizer" , snake_case__ )
_SCREAMING_SNAKE_CASE : int = self.convert_ids_to_tokens(snake_case__ , skip_special_tokens=snake_case__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case__ ) )
_SCREAMING_SNAKE_CASE : Any = []
sub_texts.append(snake_case__ )
else:
current_sub_text.append(snake_case__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_SCREAMING_SNAKE_CASE : Optional[int] = re.sub(r" (\[(MASK|SEP)\])" , r"\1" , " ".join(snake_case__ ) )
else:
_SCREAMING_SNAKE_CASE : Dict = "".join(snake_case__ )
_SCREAMING_SNAKE_CASE : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_SCREAMING_SNAKE_CASE : Optional[int] = self.clean_up_tokenization(snake_case__ )
return clean_text
else:
return text
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 572
|
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase_ : str = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
lowercase_ : Optional[int] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
lowercase_ : Dict = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
lowercase_ : str = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
lowercase_ : Tuple = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__=[1, 10, 100] , snake_case__=4 , snake_case__=3.0 ):
"""simple docstring"""
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=snake_case__ ) as executor:
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : List[str] = Counter()
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : str = defaultdict(snake_case__ )
for task_id, (candidates, test_case) in enumerate(zip(snake_case__ , snake_case__ ) ):
for candidate in candidates:
_SCREAMING_SNAKE_CASE : Any = candidate + "\n" + test_case
_SCREAMING_SNAKE_CASE : List[Any] = (test_program, timeout, task_id, completion_id[task_id])
_SCREAMING_SNAKE_CASE : List[Any] = executor.submit(snake_case__ , *snake_case__ )
futures.append(snake_case__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(snake_case__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = [], []
for result in results.values():
result.sort()
_SCREAMING_SNAKE_CASE : List[str] = [r[1]["passed"] for r in result]
total.append(len(snake_case__ ) )
correct.append(sum(snake_case__ ) )
_SCREAMING_SNAKE_CASE : List[str] = np.array(snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(snake_case__ )
_SCREAMING_SNAKE_CASE : str = k
_SCREAMING_SNAKE_CASE : Any = {F'''pass@{k}''': estimate_pass_at_k(snake_case__ , snake_case__ , snake_case__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _lowerCAmelCase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
def estimator(lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1 ) )
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = itertools.repeat(lowerCamelCase__, len(lowerCamelCase__ ) )
else:
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : int = iter(lowerCamelCase__ )
return np.array([estimator(int(lowerCamelCase__ ), int(lowerCamelCase__ ), lowerCamelCase__ ) for n, c in zip(lowerCamelCase__, lowerCamelCase__ )] )
| 572
| 1
|
from __future__ import annotations
from math import gcd
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 2 , __lowerCAmelCase = 1 , __lowerCAmelCase = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('''The input value cannot be less than 2''' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
return (pow(__lowerCAmelCase , 2 ) + step) % modulus
for _ in range(__lowerCAmelCase ):
# These track the position within the cycle detection logic.
__lowercase : Any = seed
__lowercase : str = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__lowercase : Dict = rand_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowercase : int = rand_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowercase : Optional[Any] = rand_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__lowercase : Tuple = gcd(hare - tortoise , __lowerCAmelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__lowercase : Optional[Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
__lowerCAmelCase : Dict = parser.parse_args()
__lowerCAmelCase : Any = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'{args.num} is probably prime')
else:
__lowerCAmelCase : Union[str, Any] = args.num // divisor
print(F'{args.num} = {divisor} * {quotient}')
| 284
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def snake_case_ ( *_snake_case : Union[str, Any] , **_snake_case : str ):
pass
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Union[str, Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : Dict = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : int = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def snake_case_ ( self : List[Any] , _snake_case : Any , _snake_case : Dict , _snake_case : Any ):
__lowercase : List[str] = pipeline(
'''document-question-answering''' , model=_snake_case , tokenizer=_snake_case , image_processor=_snake_case )
__lowercase : int = INVOICE_URL
__lowercase : Optional[int] = list(zip(*apply_tesseract(load_image(_snake_case ) , _snake_case , '''''' ) ) )
__lowercase : List[str] = '''What is the placebo?'''
__lowercase : Tuple = [
{
'''image''': load_image(_snake_case ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def snake_case_ ( self : List[str] , _snake_case : int , _snake_case : List[Any] ):
__lowercase : Optional[Any] = dqa_pipeline(_snake_case , top_k=2 )
self.assertEqual(
_snake_case , [
[
{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case ), '''start''': ANY(_snake_case ), '''end''': ANY(_snake_case )},
{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case ), '''start''': ANY(_snake_case ), '''end''': ANY(_snake_case )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def snake_case_ ( self : Optional[int] ):
__lowercase : List[str] = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__lowercase : Any = INVOICE_URL
__lowercase : str = '''How many cats are there?'''
__lowercase : Optional[int] = [
{'''score''': 0.00_01, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.00_01, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__lowercase : Optional[Any] = dqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(nested_simplify(_snake_case , decimals=4 ) , _snake_case )
__lowercase : Optional[Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(_snake_case , decimals=4 ) , _snake_case )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowercase : Optional[int] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowercase : str = dqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(_snake_case , [] )
# We can optionnally pass directly the words and bounding boxes
__lowercase : List[Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowercase : Dict = []
__lowercase : List[str] = []
__lowercase : Tuple = dqa_pipeline(image=_snake_case , question=_snake_case , words=_snake_case , boxes=_snake_case , top_k=2 )
self.assertEqual(_snake_case , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case_ ( self : int ):
__lowercase : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__lowercase : List[str] = INVOICE_URL
__lowercase : Optional[Any] = '''What is the invoice number?'''
__lowercase : int = dqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowercase : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowercase : List[str] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case_ ( self : int ):
__lowercase : Optional[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__lowercase : Union[str, Any] = INVOICE_URL
__lowercase : Any = '''What is the invoice number?'''
__lowercase : Any = dqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowercase : Dict = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowercase : List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case_ ( self : str ):
__lowercase : List[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=_snake_case )
__lowercase : Union[str, Any] = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=_snake_case , revision='''3dc6de3''' , )
__lowercase : Tuple = INVOICE_URL
__lowercase : Any = '''What is the invoice number?'''
__lowercase : Optional[Any] = dqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowercase : List[Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowercase : int = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__lowercase : Union[str, Any] = list(zip(*apply_tesseract(load_image(_snake_case ) , _snake_case , '''''' ) ) )
# This model should also work if `image` is set to None
__lowercase : Union[str, Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case_ ( self : List[str] ):
__lowercase : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=_snake_case )
__lowercase : List[str] = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=_snake_case , revision='''3dc6de3''' , max_seq_len=50 , )
__lowercase : Tuple = INVOICE_URL
__lowercase : Optional[int] = '''What is the invoice number?'''
__lowercase : Any = dqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowercase : Any = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__lowercase : int = list(zip(*apply_tesseract(load_image(_snake_case ) , _snake_case , '''''' ) ) )
# This model should also work if `image` is set to None
__lowercase : Tuple = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def snake_case_ ( self : List[Any] ):
__lowercase : int = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__lowercase : List[Any] = INVOICE_URL
__lowercase : List[Any] = '''What is the invoice number?'''
__lowercase : int = dqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(nested_simplify(_snake_case , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def snake_case_ ( self : int ):
pass
| 284
| 1
|
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__UpperCamelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
__UpperCamelCase = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
"emoji": True,
},
}
]
__UpperCamelCase = 0
for log in Path().glob("*.log"):
__UpperCamelCase = 0
with open(log, "r") as f:
for line in f:
__UpperCamelCase = json.loads(line)
if line.get("nodeid", "") != "":
__UpperCamelCase = line["nodeid"]
if line.get("duration", None) is not None:
__UpperCamelCase = f"""{line['duration']:.4f}"""
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__UpperCamelCase = []
log.unlink()
__UpperCamelCase = ""
__UpperCamelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__UpperCamelCase = []
__UpperCamelCase = {}
for test in failed_tests:
__UpperCamelCase = test[0].split("::")
__UpperCamelCase = data[0].split("/")[-1]
if data[0] not in filesafailed:
__UpperCamelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__UpperCamelCase = [test[0] for test in failed_table]
__UpperCamelCase = list(set(files))
# Count number of instances in failed_tests
__UpperCamelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__UpperCamelCase = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__UpperCamelCase = "Too many failed tests, please see the full report in the Action results."
__UpperCamelCase = len(err) + 10
__UpperCamelCase = message[: 3000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
__UpperCamelCase = "No failed tests! 🤗"
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
__UpperCamelCase = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
__UpperCamelCase = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
__UpperCamelCase = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
__UpperCamelCase = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
__UpperCamelCase = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
__UpperCamelCase = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__UpperCamelCase = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__UpperCamelCase = row[0]
else:
__UpperCamelCase = ""
__UpperCamelCase = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 26
|
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ConsistencyModelPipeline
SCREAMING_SNAKE_CASE__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
SCREAMING_SNAKE_CASE__ = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def lowerCAmelCase ( self ):
__UpperCamelCase : List[Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def lowerCAmelCase ( self ):
__UpperCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def lowerCAmelCase ( self , _lowerCamelCase=False ):
if class_cond:
__UpperCamelCase : int = self.dummy_cond_unet
else:
__UpperCamelCase : List[str] = self.dummy_uncond_unet
# Default to CM multistep sampler
__UpperCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__UpperCamelCase : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ):
if str(_lowerCamelCase ).startswith('mps' ):
__UpperCamelCase : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
__UpperCamelCase : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
__UpperCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [2_2, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def lowerCAmelCase ( self ):
__UpperCamelCase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Optional[Any] = self.get_dummy_components()
__UpperCamelCase : List[Any] = ConsistencyModelPipeline(**_lowerCamelCase )
__UpperCamelCase : Any = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCamelCase : Dict = self.get_dummy_inputs(_lowerCamelCase )
__UpperCamelCase : str = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
__UpperCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self ):
__UpperCamelCase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Optional[Any] = self.get_dummy_components(class_cond=_lowerCamelCase )
__UpperCamelCase : Any = ConsistencyModelPipeline(**_lowerCamelCase )
__UpperCamelCase : str = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCamelCase : List[Any] = self.get_dummy_inputs(_lowerCamelCase )
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : Tuple = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase : int = image[0, -3:, -3:, -1]
__UpperCamelCase : Optional[Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self ):
__UpperCamelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Optional[int] = self.get_dummy_components()
__UpperCamelCase : List[Any] = ConsistencyModelPipeline(**_lowerCamelCase )
__UpperCamelCase : Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCamelCase : Optional[Any] = self.get_dummy_inputs(_lowerCamelCase )
__UpperCamelCase : Dict = 1
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : Any = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCamelCase : List[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self ):
__UpperCamelCase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Any = self.get_dummy_components(class_cond=_lowerCamelCase )
__UpperCamelCase : Any = ConsistencyModelPipeline(**_lowerCamelCase )
__UpperCamelCase : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCamelCase : Optional[int] = self.get_dummy_inputs(_lowerCamelCase )
__UpperCamelCase : Optional[Any] = 1
__UpperCamelCase : Dict = None
__UpperCamelCase : Optional[int] = 0
__UpperCamelCase : str = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
__UpperCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self , _lowerCamelCase=0 , _lowerCamelCase=False , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=(1, 3, 6_4, 6_4) ):
__UpperCamelCase : Any = torch.manual_seed(_lowerCamelCase )
__UpperCamelCase : int = {
'num_inference_steps': None,
'timesteps': [2_2, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__UpperCamelCase : Optional[Any] = self.get_fixed_latents(seed=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase , shape=_lowerCamelCase )
__UpperCamelCase : int = latents
return inputs
def lowerCAmelCase ( self , _lowerCamelCase=0 , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=(1, 3, 6_4, 6_4) ):
if type(_lowerCamelCase ) == str:
__UpperCamelCase : Union[str, Any] = torch.device(_lowerCamelCase )
__UpperCamelCase : Dict = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
__UpperCamelCase : Any = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
return latents
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__UpperCamelCase : Dict = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__UpperCamelCase : List[Any] = ConsistencyModelPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pipe.to(torch_device=_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCamelCase : Any = self.get_inputs()
__UpperCamelCase : Union[str, Any] = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase : Dict = image[0, -3:, -3:, -1]
__UpperCamelCase : List[str] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__UpperCamelCase : str = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__UpperCamelCase : List[Any] = ConsistencyModelPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pipe.to(torch_device=_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCamelCase : List[Any] = self.get_inputs()
__UpperCamelCase : Optional[int] = 1
__UpperCamelCase : str = None
__UpperCamelCase : Any = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase : str = image[0, -3:, -3:, -1]
__UpperCamelCase : List[Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def lowerCAmelCase ( self ):
__UpperCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__UpperCamelCase : str = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__UpperCamelCase : Any = ConsistencyModelPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pipe.to(torch_device=_lowerCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCamelCase : str = self.get_inputs(get_fixed_latents=_lowerCamelCase , device=_lowerCamelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_lowerCamelCase , enable_math=_lowerCamelCase , enable_mem_efficient=_lowerCamelCase ):
__UpperCamelCase : Union[str, Any] = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase : str = image[0, -3:, -3:, -1]
__UpperCamelCase : Dict = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__UpperCamelCase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__UpperCamelCase : List[Any] = ConsistencyModelPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pipe.to(torch_device=_lowerCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCamelCase : Dict = self.get_inputs(get_fixed_latents=_lowerCamelCase , device=_lowerCamelCase )
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : List[str] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_lowerCamelCase , enable_math=_lowerCamelCase , enable_mem_efficient=_lowerCamelCase ):
__UpperCamelCase : Dict = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase : int = image[0, -3:, -3:, -1]
__UpperCamelCase : List[str] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 287
|
'''simple docstring'''
class __lowercase :
"""simple docstring"""
def __init__( self ):
__UpperCamelCase : Any = 0
__UpperCamelCase : Any = 0
__UpperCamelCase : Any = {}
def lowerCAmelCase ( self , _lowerCamelCase ):
if vertex not in self.adjacency:
__UpperCamelCase : Tuple = {}
self.num_vertices += 1
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
self.add_vertex(_lowerCamelCase )
self.add_vertex(_lowerCamelCase )
if head == tail:
return
__UpperCamelCase : List[Any] = weight
__UpperCamelCase : Optional[int] = weight
def lowerCAmelCase ( self ):
__UpperCamelCase : List[str] = self.get_edges()
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = edge
edges.remove((tail, head, weight) )
for i in range(len(_lowerCamelCase ) ):
__UpperCamelCase : Union[str, Any] = list(edges[i] )
edges.sort(key=lambda _lowerCamelCase : e[2] )
for i in range(len(_lowerCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__UpperCamelCase : Optional[int] = edges[i][2] + 1
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = edge
__UpperCamelCase : Tuple = weight
__UpperCamelCase : Optional[Any] = weight
def __str__( self ):
__UpperCamelCase : Optional[Any] = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__UpperCamelCase : Tuple = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def lowerCAmelCase ( self ):
__UpperCamelCase : Any = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCAmelCase ( self ):
return self.adjacency.keys()
@staticmethod
def lowerCAmelCase ( _lowerCamelCase=None , _lowerCamelCase=None ):
__UpperCamelCase : Tuple = Graph()
if vertices is None:
__UpperCamelCase : List[Any] = []
if edges is None:
__UpperCamelCase : Optional[int] = []
for vertex in vertices:
g.add_vertex(_lowerCamelCase )
for edge in edges:
g.add_edge(*_lowerCamelCase )
return g
class __lowercase :
"""simple docstring"""
def __init__( self ):
__UpperCamelCase : Union[str, Any] = {}
__UpperCamelCase : int = {}
def __len__( self ):
return len(self.parent )
def lowerCAmelCase ( self , _lowerCamelCase ):
if item in self.parent:
return self.find(_lowerCamelCase )
__UpperCamelCase : Optional[Any] = item
__UpperCamelCase : List[str] = 0
return item
def lowerCAmelCase ( self , _lowerCamelCase ):
if item not in self.parent:
return self.make_set(_lowerCamelCase )
if item != self.parent[item]:
__UpperCamelCase : int = self.find(self.parent[item] )
return self.parent[item]
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : Tuple = self.find(_lowerCamelCase )
__UpperCamelCase : Any = self.find(_lowerCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__UpperCamelCase : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
__UpperCamelCase : Optional[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__UpperCamelCase : Dict = roota
return roota
return None
@staticmethod
def lowerCAmelCase ( _lowerCamelCase ):
__UpperCamelCase : int = graph.num_vertices
__UpperCamelCase : str = Graph.UnionFind()
__UpperCamelCase : Tuple = []
while num_components > 1:
__UpperCamelCase : Dict = {}
for vertex in graph.get_vertices():
__UpperCamelCase : str = -1
__UpperCamelCase : int = graph.get_edges()
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = edge
edges.remove((tail, head, weight) )
for edge in edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = edge
__UpperCamelCase : List[Any] = union_find.find(_lowerCamelCase )
__UpperCamelCase : str = union_find.find(_lowerCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCamelCase : str = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCamelCase : str = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = cheap_edge[vertex]
if union_find.find(_lowerCamelCase ) != union_find.find(_lowerCamelCase ):
union_find.union(_lowerCamelCase , _lowerCamelCase )
mst_edges.append(cheap_edge[vertex] )
__UpperCamelCase : int = num_components - 1
__UpperCamelCase : Union[str, Any] = Graph.build(edges=_lowerCamelCase )
return mst
| 287
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : Tuple = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''vit'''
def __init__( self , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=2_2_4 , lowercase_=1_6 , lowercase_=3 , lowercase_=True , lowercase_=1_6 , **lowercase_ , ) -> Dict:
super().__init__(**lowercase_)
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
__snake_case = encoder_stride
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def _a ( self) -> float:
return 1e-4
| 313
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = CustomTokenizer
pass
| 313
| 1
|
'''simple docstring'''
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(lowercase__ , x % y )
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
return (x * y) // greatest_common_divisor(lowercase__ , lowercase__ )
def a(lowercase__ = 20 ):
'''simple docstring'''
snake_case_ = 1
for i in range(1 , n + 1 ):
snake_case_ = lcm(lowercase__ , lowercase__ )
return g
if __name__ == "__main__":
print(f"""{solution() = }""")
| 706
|
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(lowercase__ , lowercase__ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
snake_case_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowercase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
| 0
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def lowerCAmelCase__ ( self , snake_case_=0 ):
_A = np.random.RandomState(snake_case_ )
_A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase__ ( self ):
_A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = self.get_dummy_inputs()
_A = pipe(**snake_case_ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
_A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = self.get_dummy_inputs()
_A = pipe(**snake_case_ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
_A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = self.get_dummy_inputs()
_A = pipe(**snake_case_ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
_A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = self.get_dummy_inputs()
_A = pipe(**snake_case_ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
_A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = self.get_dummy_inputs()
_A = pipe(**snake_case_ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
_A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = self.get_dummy_inputs()
_A = pipe(**snake_case_ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
_A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = self.get_dummy_inputs()
_A = 3 * [inputs['prompt']]
# forward
_A = pipe(**snake_case_ )
_A = output.images[0, -3:, -3:, -1]
_A = self.get_dummy_inputs()
_A = 3 * [inputs.pop('prompt' )]
_A = pipe.tokenizer(
snake_case_ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors='np' , )
_A = text_inputs['input_ids']
_A = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_A = prompt_embeds
# forward
_A = pipe(**snake_case_ )
_A = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def lowerCAmelCase__ ( self ):
_A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = self.get_dummy_inputs()
_A = 3 * ['this is a negative prompt']
_A = negative_prompt
_A = 3 * [inputs['prompt']]
# forward
_A = pipe(**snake_case_ )
_A = output.images[0, -3:, -3:, -1]
_A = self.get_dummy_inputs()
_A = 3 * [inputs.pop('prompt' )]
_A = []
for p in [prompt, negative_prompt]:
_A = pipe.tokenizer(
snake_case_ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors='np' , )
_A = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_A, _A = embeds
# forward
_A = pipe(**snake_case_ )
_A = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self ):
_A = ort.SessionOptions()
_A = False
return options
def lowerCAmelCase__ ( self ):
# using the PNDM scheduler by default
_A = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_A = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
_A = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self ):
_A = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_A = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_A = 'open neural network exchange'
_A = np.random.RandomState(0 )
_A = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type='np' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self ):
_A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_A = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_A = 'open neural network exchange'
_A = np.random.RandomState(0 )
_A = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type='np' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self ):
_A = 0
def test_callback_fn(snake_case_ , snake_case_ , snake_case_ ) -> None:
_A = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_A = latents[0, -3:, -3:, -1]
_A = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_A = latents[0, -3:, -3:, -1]
_A = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
_A = False
_A = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = 'Andromeda galaxy in a bottle'
_A = np.random.RandomState(0 )
pipe(
prompt=snake_case_ , num_inference_steps=5 , guidance_scale=7.5 , generator=snake_case_ , callback=snake_case_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def lowerCAmelCase__ ( self ):
_A = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(snake_case_ , snake_case_ )
assert pipe.safety_checker is None
_A = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case_ )
_A = OnnxStableDiffusionPipeline.from_pretrained(snake_case_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_A = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 27
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __A :
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=99 , _snake_case=13 , _snake_case=7 , _snake_case=9 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case=8 , _snake_case=0.1 , _snake_case=0.002 , _snake_case=1 , _snake_case=0 , _snake_case=0 , _snake_case=None , _snake_case=None , ):
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : str = encoder_seq_length
_lowerCAmelCase : List[Any] = decoder_seq_length
# For common tests
_lowerCAmelCase : Dict = self.decoder_seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_attention_mask
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Tuple = d_ff
_lowerCAmelCase : int = relative_attention_num_buckets
_lowerCAmelCase : str = dropout_rate
_lowerCAmelCase : Optional[int] = initializer_factor
_lowerCAmelCase : str = eos_token_id
_lowerCAmelCase : Tuple = pad_token_id
_lowerCAmelCase : Union[str, Any] = decoder_start_token_id
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = decoder_layers
def SCREAMING_SNAKE_CASE__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , ):
if attention_mask is None:
_lowerCAmelCase : Tuple = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowerCAmelCase : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_snake_case )
if decoder_head_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
if cross_attn_head_mask is None:
_lowerCAmelCase : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowerCAmelCase : Tuple = input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase : int = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase : Dict = self.get_config()
_lowerCAmelCase : Optional[int] = config.num_attention_heads
_lowerCAmelCase : Any = self.prepare_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, input_dict
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
_lowerCAmelCase : Optional[int] = UMTaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase : int = model(
input_ids=_snake_case , decoder_input_ids=_snake_case , attention_mask=_snake_case , decoder_attention_mask=_snake_case , )
_lowerCAmelCase : Union[str, Any] = model(input_ids=_snake_case , decoder_input_ids=_snake_case )
_lowerCAmelCase : Optional[int] = result.last_hidden_state
_lowerCAmelCase : Tuple = result.past_key_values
_lowerCAmelCase : Optional[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
_lowerCAmelCase : int = UMTaModel(config=_snake_case ).get_decoder().to(_snake_case ).eval()
# first forward pass
_lowerCAmelCase : Optional[Any] = model(_snake_case , use_cache=_snake_case )
_lowerCAmelCase : Union[str, Any] = model(_snake_case )
_lowerCAmelCase : Union[str, Any] = model(_snake_case , use_cache=_snake_case )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) + 1 )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_lowerCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase : Optional[Any] = model(_snake_case )["last_hidden_state"]
_lowerCAmelCase : int = model(_snake_case , past_key_values=_snake_case )["last_hidden_state"]
# select random slice
_lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase : int = output_from_no_past[:, -1, random_slice_idx].detach()
_lowerCAmelCase : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , ):
_lowerCAmelCase : Optional[int] = UMTaModel(config=_snake_case ).to(_snake_case ).half().eval()
_lowerCAmelCase : int = model(**_snake_case )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_snake_case ).any().item() )
@require_torch
class __A ( snake_case__ ,snake_case__ ,snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a_ = True
a_ = False
a_ = False
a_ = True
a_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a_ = [0.8, 0.9]
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Tuple = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : str = UMTaModel(config_and_inputs[0] ).to(_snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=_snake_case , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Tuple = config_and_inputs[0]
_lowerCAmelCase : List[str] = UMTaForConditionalGeneration(_snake_case ).eval()
model.to(_snake_case )
_lowerCAmelCase : Any = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=_snake_case ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
}
for attn_name, (name, mask) in zip(_snake_case , head_masking.items() ):
_lowerCAmelCase : Dict = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowerCAmelCase : Any = torch.ones(
config.num_decoder_layers , config.num_heads , device=_snake_case )
_lowerCAmelCase : str = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=_snake_case , return_dict_in_generate=_snake_case , **_snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowerCAmelCase : List[Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=_snake_case ).to(_snake_case )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=_snake_case , legacy=_snake_case )
_lowerCAmelCase : Tuple = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
_lowerCAmelCase : Tuple = tokenizer(_snake_case , return_tensors="pt" , padding=_snake_case ).input_ids
# fmt: off
_lowerCAmelCase : List[Any] = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_snake_case , _snake_case )
_lowerCAmelCase : Tuple = model.generate(input_ids.to(_snake_case ) )
_lowerCAmelCase : Dict = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
_lowerCAmelCase : List[str] = tokenizer.batch_decode(_snake_case )
self.assertEqual(_snake_case , _snake_case )
| 424
| 0
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( A, unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = DebertaTokenizer
_A : List[Any] = True
_A : Any = DebertaTokenizerFast
def A_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase__ = dict(zip(_a , range(len(_a ) ) ) )
UpperCamelCase__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase__ = {'''unk_token''': '''[UNK]'''}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def A_ ( self : Union[str, Any] , **_a : str ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def A_ ( self : List[Any] , _a : Tuple ):
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = '''lower newer'''
return input_text, output_text
def A_ ( self : List[str] ):
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase__ = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def A_ ( self : Dict ):
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = tokenizer('''Hello''' , '''World''' )
UpperCamelCase__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , _a )
@slow
def A_ ( self : int ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
UpperCamelCase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
UpperCamelCase__ = tokenizer.encode(
'''sequence builders''' , add_special_tokens=_a , add_prefix_space=_a )
UpperCamelCase__ = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_a , add_prefix_space=_a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(_a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase__ = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase__ = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase__ = tokenizer(_a , padding=_a )
UpperCamelCase__ = [tokenizer.decode(_a , skip_special_tokens=_a ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase__ = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase__ = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , _a )
for expected, decoded in zip(_a , _a ):
self.assertEqual(_a , _a )
| 591
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowercase ( A, A, A, unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = StableDiffusionControlNetImgaImgPipeline
_A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_A : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
_A : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : Optional[Any] ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase__ = CLIPTextModel(_a )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Optional[Any] , _a : Optional[int] , _a : Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
UpperCamelCase__ = torch.manual_seed(_a )
else:
UpperCamelCase__ = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase__ = 2
UpperCamelCase__ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , )
UpperCamelCase__ = floats_tensor(control_image.shape , rng=random.Random(_a ) ).to(_a )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def A_ ( self : Union[str, Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A_ ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def A_ ( self : Any ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __lowercase ( A, A, unittest.TestCase ):
'''simple docstring'''
_A : int = StableDiffusionControlNetImgaImgPipeline
_A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_A : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : Optional[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def A_ ( self : Tuple ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_a : List[str] ):
if isinstance(_a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
UpperCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase__ = CLIPTextModel(_a )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ = MultiControlNetModel([controlneta, controlneta] )
UpperCamelCase__ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Tuple , _a : Dict , _a : Optional[int]=0 ):
if str(_a ).startswith('''mps''' ):
UpperCamelCase__ = torch.manual_seed(_a )
else:
UpperCamelCase__ = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase__ = 2
UpperCamelCase__ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
]
UpperCamelCase__ = floats_tensor(control_image[0].shape , rng=random.Random(_a ) ).to(_a )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def A_ ( self : str ):
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**_a )
pipe.to(_a )
UpperCamelCase__ = 10.0
UpperCamelCase__ = 4
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a )[0]
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def A_ ( self : int ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A_ ( self : int ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def A_ ( self : int ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def A_ ( self : Dict ):
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Dict ):
UpperCamelCase__ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
UpperCamelCase__ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_a , controlnet=_a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase__ = '''evil space-punk bird'''
UpperCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
UpperCamelCase__ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
UpperCamelCase__ = pipe(
_a , _a , control_image=_a , generator=_a , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCamelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
UpperCamelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 591
| 1
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
return int(x / 2**20 )
class _lowerCAmelCase :
'''simple docstring'''
def __enter__( self : int ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_snake_case : str = torch.cuda.memory_allocated()
return self
def __exit__( self : Union[str, Any] , *UpperCamelCase : Optional[int] ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_snake_case : Union[str, Any] = torch.cuda.memory_allocated()
_snake_case : str = torch.cuda.max_memory_allocated()
_snake_case : Any = bamb(self.end - self.begin )
_snake_case : str = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase_ ( lowerCAmelCase: Accelerator , lowerCAmelCase: int = 16 , lowerCAmelCase: str = "bert-base-cased" , lowerCAmelCase: int = 3_20 , lowerCAmelCase: int = 1_60 , )-> Union[str, Any]:
_snake_case : Tuple = AutoTokenizer.from_pretrained(lowerCAmelCase )
_snake_case : List[Any] = load_dataset(
'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} )
def tokenize_function(lowerCAmelCase: List[str] ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase , max_length=lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_snake_case : Optional[Any] = datasets.map(
lowerCAmelCase , batched=lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase: Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_snake_case : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=lowerCAmelCase )
_snake_case : List[str] = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=lowerCAmelCase )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Tuple )-> Union[str, Any]:
# Initialize accelerator
_snake_case : Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : int = config['lr']
_snake_case : List[str] = int(config['num_epochs'] )
_snake_case : List[str] = int(config['seed'] )
_snake_case : List[str] = int(config['batch_size'] )
_snake_case : str = args.model_name_or_path
set_seed(lowerCAmelCase )
_snake_case , _snake_case : Dict = get_dataloaders(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase , return_dict=lowerCAmelCase )
# Instantiate optimizer
_snake_case : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_snake_case : int = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_snake_case : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_snake_case : Any = 1
_snake_case : Dict = (len(lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_snake_case : str = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase , num_warmup_steps=0 , num_training_steps=lowerCAmelCase , )
else:
_snake_case : List[str] = DummyScheduler(lowerCAmelCase , total_num_steps=lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_snake_case : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
_snake_case : Optional[Any] = 0
# Now we train the model
_snake_case : Union[str, Any] = {}
for epoch in range(lowerCAmelCase , lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCAmelCase ):
_snake_case : str = model(**lowerCAmelCase )
_snake_case : int = outputs.loss
_snake_case : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_snake_case : str = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=lowerCAmelCase , default=lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=lowerCAmelCase , default=3_20 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=lowerCAmelCase , default=1_60 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=lowerCAmelCase , default=1 , help='Number of train epochs.' , )
_snake_case : List[Any] = parser.parse_args()
_snake_case : List[str] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
main()
| 411
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = RoCBertTokenizer
lowerCamelCase : str = None
lowerCamelCase : Dict = False
lowerCamelCase : Dict = True
lowerCamelCase : Any = filter_non_english
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
lowerCAmelCase = {}
lowerCAmelCase = {}
for i, value in enumerate(UpperCAmelCase__ ):
lowerCAmelCase = i
lowerCAmelCase = i
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> int:
lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(UpperCAmelCase__ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
def __UpperCAmelCase ( self : int ) -> str:
lowerCAmelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : Any ) -> Dict:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCAmelCase = {}
for i, token in enumerate(UpperCAmelCase__ ):
lowerCAmelCase = i
lowerCAmelCase = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __UpperCAmelCase ( self : Dict ) -> int:
lowerCAmelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
lowerCAmelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
lowerCAmelCase = tokenizer_r.encode_plus(
UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , )
lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase__ , 'do_lower_case' ) else False
lowerCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
lowerCAmelCase = ['的', '人', '有']
lowerCAmelCase = ''.join(UpperCAmelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = False
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(UpperCAmelCase__ )
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase = tokenizer.encode('你好' , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode('你是谁' , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase = self.get_tokenizers(do_lower_case=UpperCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase = '你好,你是谁'
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.prepare_for_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 133
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[int] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714
|
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_UpperCAmelCase : int = Mapping[str, np.ndarray]
_UpperCAmelCase : List[Any] = Mapping[str, Any] # Is a nested dict.
_UpperCAmelCase : Dict = 0.01
@dataclasses.dataclass(frozen=__UpperCamelCase )
class lowerCAmelCase :
UpperCAmelCase__ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCAmelCase__ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCAmelCase__ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCAmelCase__ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCAmelCase__ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCAmelCase__ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCAmelCase__ = None
# Templates used to generate this protein (prediction-only)
UpperCAmelCase__ = None
# Chain corresponding to each parent
UpperCAmelCase__ = None
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Protein:
lowerCamelCase__ : Optional[int] = r'(\[[A-Z]+\]\n)'
lowerCamelCase__ : List[str] = [tag.strip() for tag in re.split(_UpperCAmelCase , _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0]
lowerCamelCase__ : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
lowerCamelCase__ : List[str] = ["N", "CA", "C"]
lowerCamelCase__ : Dict = None
lowerCamelCase__ : str = None
lowerCamelCase__ : int = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowerCamelCase__ : int = g[1][0].strip()
for i in range(len(_UpperCAmelCase ) ):
if seq[i] not in residue_constants.restypes:
lowerCamelCase__ : Union[str, Any] = 'X' # FIXME: strings are immutable
lowerCamelCase__ : Union[str, Any] = np.array(
[residue_constants.restype_order.get(_UpperCAmelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowerCamelCase__ : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(_UpperCAmelCase , g[1][axis].split() ) ) )
lowerCamelCase__ : int = np.array(_UpperCAmelCase )
lowerCamelCase__ : Tuple = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_UpperCAmelCase ):
lowerCamelCase__ : int = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowerCamelCase__ : int = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
lowerCamelCase__ : List[Any] = np.zeros(
(
len(_UpperCAmelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_UpperCAmelCase , atom_mask=_UpperCAmelCase , aatype=_UpperCAmelCase , residue_index=np.arange(len(_UpperCAmelCase ) ) , b_factors=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = 0 ) -> List[str]:
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Dict = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
lowerCamelCase__ : str = prot.parents
lowerCamelCase__ : Union[str, Any] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowerCamelCase__ : Any = [p for i, p in zip(_UpperCAmelCase , _UpperCAmelCase ) if i == chain_id]
if parents is None or len(_UpperCAmelCase ) == 0:
lowerCamelCase__ : List[Any] = ['N/A']
pdb_headers.append(F"""PARENT {" ".join(_UpperCAmelCase )}""" )
return pdb_headers
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : str = pdb_str.split('\n' )
lowerCamelCase__ : int = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
lowerCamelCase__ : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
lowerCamelCase__ : List[Any] = []
if prot.parents_chain_index is not None:
lowerCamelCase__ : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_UpperCAmelCase ) , [] )
parent_dict[str(_UpperCAmelCase )].append(_UpperCAmelCase )
lowerCamelCase__ : str = max([int(_UpperCAmelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowerCamelCase__ : Optional[Any] = parent_dict.get(str(_UpperCAmelCase ) , ['N/A'] )
parents_per_chain.append(_UpperCAmelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowerCamelCase__ : Union[str, Any] = [['N/A']]
def make_parent_line(_UpperCAmelCase ) -> str:
return F"""PARENT {" ".join(_UpperCAmelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowerCamelCase__ : List[Any] = 0
for i, l in enumerate(_UpperCAmelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_UpperCAmelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = parents_per_chain[chain_counter]
else:
lowerCamelCase__ : Optional[Any] = ['N/A']
out_pdb_lines.append(make_parent_line(_UpperCAmelCase ) )
return "\n".join(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Tuple = residue_constants.restypes + ['X']
def res_atoa(_UpperCAmelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
lowerCamelCase__ : int = residue_constants.atom_types
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Union[str, Any] = prot.atom_mask
lowerCamelCase__ : Union[str, Any] = prot.aatype
lowerCamelCase__ : int = prot.atom_positions
lowerCamelCase__ : List[Any] = prot.residue_index.astype(np.intaa )
lowerCamelCase__ : Optional[int] = prot.b_factors
lowerCamelCase__ : Any = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
lowerCamelCase__ : List[Any] = get_pdb_headers(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
pdb_lines.extend(_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = aatype.shape[0]
lowerCamelCase__ : Optional[Any] = 1
lowerCamelCase__ : str = 0
lowerCamelCase__ : Tuple = string.ascii_uppercase
lowerCamelCase__ : str = None
# Add all atom sites.
for i in range(_UpperCAmelCase ):
lowerCamelCase__ : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_UpperCAmelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowerCamelCase__ : Union[str, Any] = 'ATOM'
lowerCamelCase__ : Optional[int] = atom_name if len(_UpperCAmelCase ) == 4 else F""" {atom_name}"""
lowerCamelCase__ : Any = ''
lowerCamelCase__ : Optional[Any] = ''
lowerCamelCase__ : str = 1.00
lowerCamelCase__ : str = atom_name[0] # Protein supports only C, N, O, S, this works.
lowerCamelCase__ : List[str] = ''
lowerCamelCase__ : str = 'A'
if chain_index is not None:
lowerCamelCase__ : List[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowerCamelCase__ : Union[str, Any] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(_UpperCAmelCase )
atom_index += 1
lowerCamelCase__ : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[str] = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowerCamelCase__ : int = 'TER'
lowerCamelCase__ : Any = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(_UpperCAmelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_UpperCAmelCase , _UpperCAmelCase ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ) -> Protein:
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=_UpperCAmelCase , remark=_UpperCAmelCase , parents=_UpperCAmelCase , parents_chain_index=_UpperCAmelCase , )
| 188
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.