code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=None , lowercase=2 , ) -> str:
'''simple docstring'''
a__ : Dict = parent
a__ : List[str] = batch_size
a__ : str = image_size
a__ : Union[str, Any] = patch_size
a__ : str = num_channels
a__ : List[Any] = is_training
a__ : str = use_labels
a__ : Optional[Any] = hidden_size
a__ : Dict = num_hidden_layers
a__ : List[str] = num_attention_heads
a__ : int = intermediate_size
a__ : Tuple = hidden_act
a__ : Optional[int] = hidden_dropout_prob
a__ : str = attention_probs_dropout_prob
a__ : Tuple = type_sequence_label_size
a__ : Union[str, Any] = initializer_range
a__ : Dict = scope
a__ : Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[int] = (image_size // patch_size) ** 2
a__ : Optional[Any] = num_patches + 1
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : List[Any] = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : List[Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = ViTModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a__ : int = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = ViTForMaskedImageModeling(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a__ : List[str] = model(lowerCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a__ : Dict = 1
a__ : Optional[int] = ViTForMaskedImageModeling(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a__ : Union[str, Any] = model(lowerCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__ : Any = self.type_sequence_label_size
a__ : Union[str, Any] = ViTForImageClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a__ : Dict = model(lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a__ : int = 1
a__ : Tuple = ViTForImageClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a__ : Any = model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) ,
) : int = config_and_inputs
a__ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Dict = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__A : Any = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
__A : Optional[int] = True
__A : int = False
__A : Any = False
__A : Tuple = False
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple = ViTModelTester(self)
a__ : List[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowerCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a__ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear))
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ , a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Tuple = model_class(lowerCAmelCase_)
a__ : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_)
@slow
def __lowercase ( self) -> int:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Any = ViTModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
def A_ ( ) -> int:
a__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Dict = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(lowerCAmelCase_)
a__ : Optional[Any] = self.default_image_processor
a__ : int = prepare_img()
a__ : Union[str, Any] = image_processor(images=lowerCAmelCase_ , return_tensors='pt').to(lowerCAmelCase_)
# forward pass
with torch.no_grad():
a__ : int = model(**lowerCAmelCase_)
# verify the logits
a__ : Optional[int] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase_)
a__ : Any = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(lowerCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4))
@slow
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Union[str, Any] = ViTModel.from_pretrained('facebook/dino-vits8').to(lowerCAmelCase_)
a__ : Dict = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480)
a__ : Any = prepare_img()
a__ : str = image_processor(images=lowerCAmelCase_ , return_tensors='pt')
a__ : Union[str, Any] = inputs.pixel_values.to(lowerCAmelCase_)
# forward pass
with torch.no_grad():
a__ : int = model(lowerCAmelCase_ , interpolate_pos_encoding=lowerCAmelCase_)
# verify the logits
a__ : str = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase_)
a__ : Tuple = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(lowerCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : int = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a__ : Union[str, Any] = self.default_image_processor
a__ : Tuple = prepare_img()
a__ : List[str] = image_processor(images=lowerCAmelCase_ , return_tensors='pt')
a__ : int = inputs.pixel_values.to(lowerCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a__ : Optional[int] = model(lowerCAmelCase_)
| 99
|
"""simple docstring"""
from math import isqrt, loga
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list[int]:
'''simple docstring'''
lowercase_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = False
return [i for i in range(2 , __lowerCAmelCase ) if is_prime[i]]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 80_08_00 , __lowerCAmelCase = 80_08_00 ) -> int:
'''simple docstring'''
lowercase_ = degree * loga(__lowerCAmelCase )
lowercase_ = int(__lowerCAmelCase )
lowercase_ = calculate_prime_numbers(__lowerCAmelCase )
lowercase_ = 0
lowercase_ = 0
lowercase_ = len(__lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 136
| 0
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCAmelCase : List[Any] = TypeVar("""KT""")
lowerCAmelCase : Optional[int] = TypeVar("""VT""")
class UpperCamelCase__ ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , snake_case__ = "root" , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = key
_lowerCAmelCase : Tuple = value
_lowerCAmelCase : list[Node[KT, VT]] = []
def __repr__( self ):
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def a ( self ):
'''simple docstring'''
return len(self.forward )
class UpperCamelCase__ ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , snake_case__ = 0.5 , snake_case__ = 16 ):
'''simple docstring'''
_lowerCAmelCase : Node[KT, VT] = Node[KT, VT]()
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : List[str] = p
_lowerCAmelCase : str = max_level
def __str__( self ):
'''simple docstring'''
_lowerCAmelCase : int = list(self )
if len(snake_case__ ) == 0:
return F'SkipList(level={self.level})'
_lowerCAmelCase : Any = max((len(str(snake_case__ ) ) for item in items) , default=4 )
_lowerCAmelCase : List[str] = max(snake_case__ , 4 ) + 4
_lowerCAmelCase : Union[str, Any] = self.head
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(snake_case__ , '-' ) + '* ' * len(snake_case__ ) )
lines.append(' ' * label_size + '| ' * len(snake_case__ ) )
while len(node.forward ) != 0:
_lowerCAmelCase : Union[str, Any] = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(snake_case__ , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(snake_case__ ) )
_lowerCAmelCase : Union[str, Any] = node.forward
lines.append('None'.ljust(snake_case__ ) + '* ' * len(snake_case__ ) )
return F'SkipList(level={self.level})\n' + "\n".join(snake_case__ )
def __iter__( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_lowerCAmelCase : List[Any] = node.forward[0]
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = []
_lowerCAmelCase : Optional[int] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_lowerCAmelCase : Any = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(snake_case__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self._locate_node(snake_case__ )
if node is not None:
for i, update_node in enumerate(snake_case__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_lowerCAmelCase : Optional[int] = node.forward[i]
else:
_lowerCAmelCase : int = update_node.forward[:i]
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = self._locate_node(snake_case__ )
if node is not None:
_lowerCAmelCase : Any = value
else:
_lowerCAmelCase : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , snake_case__ ):
update_vector.append(self.head )
_lowerCAmelCase : Optional[int] = level
_lowerCAmelCase : List[str] = Node(snake_case__ , snake_case__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(snake_case__ )
else:
_lowerCAmelCase : Union[str, Any] = new_node
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = self._locate_node(snake_case__ )
if node is not None:
return node.value
return None
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Any = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 1_2 )
skip_list.insert('Key3' , 4_1 )
skip_list.insert('Key4' , -1_9 )
_lowerCAmelCase : Optional[int] = skip_list.head
_lowerCAmelCase : Any = {}
while node.level != 0:
_lowerCAmelCase : List[str] = node.forward[0]
_lowerCAmelCase : int = node.value
assert len(_A ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Any = SkipList()
skip_list.insert('Key1' , 1_0 )
skip_list.insert('Key1' , 1_2 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 1_0 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 1_0 )
_lowerCAmelCase : Union[str, Any] = skip_list.head
_lowerCAmelCase : Optional[Any] = {}
while node.level != 0:
_lowerCAmelCase : List[Any] = node.forward[0]
_lowerCAmelCase : Any = node.value
if len(_A ) != 4:
print()
assert len(_A ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : List[Any] = SkipList()
assert skip_list.find('Some key' ) is None
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = SkipList()
skip_list.insert('Key2' , 2_0 )
assert skip_list.find('Key2' ) == 2_0
skip_list.insert('Some Key' , 1_0 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 1_3 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 1_0
assert skip_list.find('V' ) == 1_3
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : List[str] = SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 1_4
assert skip_list.find('Key1' ) == 1_2
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 1_2
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : List[str] = SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4_2 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('X' )
def traverse_keys(_A ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_A )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowercase ():
"""simple docstring"""
def is_sorted(_A ):
return all(next_item >= item for item, next_item in zip(_A , lst[1:] ) )
_lowerCAmelCase : Optional[Any] = SkipList()
for i in range(1_0 ):
skip_list.insert(_A , _A )
assert is_sorted(list(_A ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_A ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(_A ) )
def lowercase ():
"""simple docstring"""
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Any = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 25
|
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25
| 1
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _a :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: int=13 , __lowerCamelCase: Optional[Any]=30 , __lowerCamelCase: Tuple=2 , __lowerCamelCase: Union[str, Any]=3 , __lowerCamelCase: Any=True , __lowerCamelCase: Any=True , __lowerCamelCase: Any=32 , __lowerCamelCase: List[str]=5 , __lowerCamelCase: Optional[Any]=4 , __lowerCamelCase: Any=37 , __lowerCamelCase: str="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: int=0.1 , __lowerCamelCase: str=10 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: Any=3 , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Union[str, Any]=2 , ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = parent
UpperCamelCase__: List[str] = batch_size
UpperCamelCase__: Optional[int] = image_size
UpperCamelCase__: Tuple = patch_size
UpperCamelCase__: Optional[int] = num_channels
UpperCamelCase__: List[Any] = is_training
UpperCamelCase__: Dict = use_labels
UpperCamelCase__: Dict = hidden_size
UpperCamelCase__: Any = num_hidden_layers
UpperCamelCase__: List[str] = num_attention_heads
UpperCamelCase__: List[str] = intermediate_size
UpperCamelCase__: Dict = hidden_act
UpperCamelCase__: Dict = hidden_dropout_prob
UpperCamelCase__: Optional[int] = attention_probs_dropout_prob
UpperCamelCase__: Tuple = type_sequence_label_size
UpperCamelCase__: Optional[Any] = initializer_range
UpperCamelCase__: Optional[Any] = scope
UpperCamelCase__: Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase__: List[Any] = (image_size // patch_size) ** 2
UpperCamelCase__: List[Any] = num_patches + 2
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__: List[str] = None
if self.use_labels:
UpperCamelCase__: List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__: Dict = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: Tuple , __lowerCamelCase: int , __lowerCamelCase: Tuple ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = DeiTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCamelCase__: Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Any ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = DeiTForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCamelCase__: int = model(_lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase__: Dict = 1
UpperCamelCase__: List[Any] = DeiTForMaskedImageModeling(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCamelCase__: Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__: Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any] , __lowerCamelCase: Tuple ):
'''simple docstring'''
UpperCamelCase__: Dict = self.type_sequence_label_size
UpperCamelCase__: Dict = DeiTForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCamelCase__: Union[str, Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__: str = 1
UpperCamelCase__: List[str] = DeiTForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCamelCase__: str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__: List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
): str = config_and_inputs
UpperCamelCase__: Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = DeiTModelTester(self )
UpperCamelCase__: List[Any] = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__: List[Any] = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__: str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__: List[str] = model_class(_lowerCAmelCase )
UpperCamelCase__: Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__: Optional[int] = [*signature.parameters.keys()]
UpperCamelCase__: Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def UpperCAmelCase_ ( self: int , __lowerCamelCase: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: Tuple=False ):
'''simple docstring'''
UpperCamelCase__: Tuple = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase__ , UpperCamelCase__: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__: Any = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCamelCase__: Any = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
UpperCamelCase__: Union[str, Any] = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
UpperCamelCase__: Optional[int] = model(**_lowerCAmelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Any = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase__: str = False
UpperCamelCase__: Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCamelCase__: str = model_class(_lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCAmelCase )
model.train()
UpperCamelCase__: Optional[int] = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
UpperCamelCase__: Tuple = model(**_lowerCAmelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__: Union[str, Any] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCAmelCase ),
*get_values(_lowerCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
UpperCamelCase__: Dict = problem_type["title"]
UpperCamelCase__: Any = problem_type["num_labels"]
UpperCamelCase__: List[Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
UpperCamelCase__: List[Any] = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if problem_type["num_labels"] > 1:
UpperCamelCase__: Any = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCamelCase__: Optional[int] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCAmelCase ) as warning_list:
UpperCamelCase__: Union[str, Any] = model(**_lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__: Tuple = DeiTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase_ ( ):
UpperCamelCase__: str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class _a ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: int = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
_lowerCAmelCase )
UpperCamelCase__: Dict = self.default_image_processor
UpperCamelCase__: Optional[Any] = prepare_img()
UpperCamelCase__: str = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase__: Tuple = model(**_lowerCAmelCase )
# verify the logits
UpperCamelCase__: int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCamelCase__: Union[str, Any] = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Any = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
UpperCamelCase__: Tuple = self.default_image_processor
UpperCamelCase__: Tuple = prepare_img()
UpperCamelCase__: Optional[int] = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
UpperCamelCase__: str = inputs.pixel_values.to(_lowerCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCamelCase__: Union[str, Any] = model(_lowerCAmelCase )
| 149
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase =SpeechTaTokenizer(_lowerCAmelCase)
__lowercase =AddedToken('<mask>' , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase)
__lowercase =mask_token
tokenizer.add_special_tokens({'mask_token': mask_token})
tokenizer.add_tokens(['<ctc_blank>'])
tokenizer.save_pretrained(self.tmpdirname)
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase ='this is a test'
__lowercase ='this is a test'
return input_text, output_text
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Dict=2_0 , _lowerCAmelCase : Tuple=5):
'''simple docstring'''
__lowercase , __lowercase =self.get_input_output_texts(_lowerCAmelCase)
__lowercase =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
__lowercase =tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase)
return text, ids
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase ='<pad>'
__lowercase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase) , _lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase) , _lowerCAmelCase)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-4] , 'œ')
self.assertEqual(vocab_keys[-2] , '<mask>')
self.assertEqual(vocab_keys[-1] , '<ctc_blank>')
self.assertEqual(len(_lowerCAmelCase) , 8_1)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 7_9)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =self.get_tokenizers(do_lower_case=_lowerCAmelCase)
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}"""):
__lowercase =tokenizer.vocab_size
__lowercase =len(_lowerCAmelCase)
self.assertNotEqual(_lowerCAmelCase , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase =['aaaaa bbbbbb', 'cccccccccdddddddd']
__lowercase =tokenizer.add_tokens(_lowerCAmelCase)
__lowercase =tokenizer.vocab_size
__lowercase =len(_lowerCAmelCase)
self.assertNotEqual(_lowerCAmelCase , 0)
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase)
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase))
self.assertEqual(_lowerCAmelCase , all_size + len(_lowerCAmelCase))
__lowercase =tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_lowerCAmelCase)
self.assertGreaterEqual(len(_lowerCAmelCase) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
__lowercase ={'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
__lowercase =tokenizer.add_special_tokens(_lowerCAmelCase)
__lowercase =tokenizer.vocab_size
__lowercase =len(_lowerCAmelCase)
self.assertNotEqual(_lowerCAmelCase , 0)
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase)
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase))
self.assertEqual(_lowerCAmelCase , all_size_a + len(_lowerCAmelCase))
__lowercase =tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_lowerCAmelCase)
self.assertGreaterEqual(len(_lowerCAmelCase) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
pass
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
pass
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =self.get_tokenizer()
__lowercase =tokenizer.tokenize('This is a test')
# fmt: off
self.assertListEqual(_lowerCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
__lowercase =tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
__lowercase =tokenizer.convert_tokens_to_ids(_lowerCAmelCase)
# fmt: off
self.assertListEqual(_lowerCAmelCase , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6])
# fmt: on
__lowercase =tokenizer.convert_ids_to_tokens(_lowerCAmelCase)
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
@slow
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
__lowercase ={
'input_ids': [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_lowerCAmelCase , )
| 166
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCamelCase__ ( __snake_case ):
'''simple docstring'''
_lowerCamelCase = 'mobilenet_v1'
def __init__( self ,lowerCamelCase_=3 ,lowerCamelCase_=2_2_4 ,lowerCamelCase_=1.0 ,lowerCamelCase_=8 ,lowerCamelCase_="relu6" ,lowerCamelCase_=True ,lowerCamelCase_=0.9_99 ,lowerCamelCase_=0.02 ,lowerCamelCase_=0.0_01 ,**lowerCamelCase_ ,) -> List[str]:
super().__init__(**lowerCamelCase_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
A = num_channels
A = image_size
A = depth_multiplier
A = min_depth
A = hidden_act
A = tf_padding
A = classifier_dropout_prob
A = initializer_range
A = layer_norm_eps
class lowerCamelCase__ ( __snake_case ):
'''simple docstring'''
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def UpperCamelCase__ ( self ) -> float:
return 1E-4
| 362
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''distilbert'''
_lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self ,lowerCamelCase_=3_0_5_2_2 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=False ,lowerCamelCase_=6 ,lowerCamelCase_=1_2 ,lowerCamelCase_=7_6_8 ,lowerCamelCase_=4 * 7_6_8 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.02 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.2 ,lowerCamelCase_=0 ,**lowerCamelCase_ ,) -> Dict:
A = vocab_size
A = max_position_embeddings
A = sinusoidal_pos_embds
A = n_layers
A = n_heads
A = dim
A = hidden_dim
A = dropout
A = attention_dropout
A = activation
A = initializer_range
A = qa_dropout
A = seq_classif_dropout
super().__init__(**lowerCamelCase_ ,pad_token_id=lowerCamelCase_ )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 77
| 0
|
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase ( lowercase : dict , lowercase : str ) -> set[str]:
_a , _a = set(lowercase ), [start]
while stack:
_a = stack.pop()
explored.add(lowercase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(lowercase )
return explored
lowerCAmelCase_ : Dict = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 63
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deta'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Dict ):
return self.d_model
def UpperCamelCase__ ( self : List[str] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 63
| 1
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( a__ , a__ , a__ ):
@register_to_config
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False , ):
super().__init__()
UpperCAmelCase__ : Optional[Any] = nn.Embedding(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Optional[int] = nn.Embedding(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Optional[Any] = nn.Dropout(p=_lowerCamelCase)
UpperCAmelCase__ : Tuple = TaConfig(
vocab_size=_lowerCamelCase , d_model=_lowerCamelCase , num_heads=_lowerCamelCase , d_kv=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase , feed_forward_proj=_lowerCamelCase , is_decoder=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , )
UpperCAmelCase__ : List[str] = nn.ModuleList()
for lyr_num in range(_lowerCamelCase):
UpperCAmelCase__ : Dict = TaBlock(_lowerCamelCase)
self.encoders.append(_lowerCamelCase)
UpperCAmelCase__ : List[str] = TaLayerNorm(_lowerCamelCase)
UpperCAmelCase__ : Any = nn.Dropout(p=_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = self.token_embedder(_lowerCamelCase)
UpperCAmelCase__ : Any = encoder_input_tokens.shape[1]
UpperCAmelCase__ : str = torch.arange(_lowerCamelCase , device=encoder_input_tokens.device)
x += self.position_encoding(_lowerCamelCase)
UpperCAmelCase__ : List[str] = self.dropout_pre(_lowerCamelCase)
# inverted the attention mask
UpperCAmelCase__ : int = encoder_input_tokens.size()
UpperCAmelCase__ : List[Any] = self.get_extended_attention_mask(_lowerCamelCase , _lowerCamelCase)
for lyr in self.encoders:
UpperCAmelCase__ : Tuple = lyr(_lowerCamelCase , _lowerCamelCase)[0]
UpperCAmelCase__ : Optional[int] = self.layer_norm(_lowerCamelCase)
return self.dropout_post(_lowerCamelCase), encoder_inputs_mask
| 283
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A =logging.get_logger(__name__)
__A ={'vocab_file': 'spiece.model'}
__A ={
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class _snake_case ( a__ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<sep>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<cls>" , _lowerCamelCase="<mask>" , _lowerCamelCase=["<eop>", "<eod>"] , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : Dict = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase) if isinstance(_lowerCamelCase , _lowerCamelCase) else mask_token
UpperCAmelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : str = do_lower_case
UpperCAmelCase__ : Tuple = remove_space
UpperCAmelCase__ : List[str] = keep_accents
UpperCAmelCase__ : Any = vocab_file
UpperCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowerCamelCase)
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""")
UpperCAmelCase__ : Union[str, Any] = jieba
UpperCAmelCase__ : int = str.maketrans(""" \n""" , """\u2582\u2583""")
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def snake_case__ ( self):
return len(self.sp_model)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = {self.convert_ids_to_tokens(_lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
UpperCAmelCase__ : List[str] = self.__dict__.copy()
UpperCAmelCase__ : Optional[int] = None
return state
def __setstate__( self , _lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def snake_case__ ( self , _lowerCamelCase):
if self.remove_space:
UpperCAmelCase__ : Tuple = """ """.join(inputs.strip().split())
else:
UpperCAmelCase__ : str = inputs
UpperCAmelCase__ : List[Any] = outputs.replace("""``""" , """\"""").replace("""''""" , """\"""")
if not self.keep_accents:
UpperCAmelCase__ : str = unicodedata.normalize("""NFKD""" , _lowerCamelCase)
UpperCAmelCase__ : Any = """""".join([c for c in outputs if not unicodedata.combining(_lowerCamelCase)])
if self.do_lower_case:
UpperCAmelCase__ : Optional[int] = outputs.lower()
return outputs
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : List[str] = self.preprocess_text(_lowerCamelCase)
UpperCAmelCase__ : Dict = self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase)
UpperCAmelCase__ : List[Any] = []
for piece in pieces:
if len(_lowerCamelCase) > 1 and piece[-1] == str(""",""") and piece[-2].isdigit():
UpperCAmelCase__ : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCamelCase , """"""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase__ : Optional[int] = cur_pieces[1:]
else:
UpperCAmelCase__ : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowerCamelCase)
else:
new_pieces.append(_lowerCamelCase)
return new_pieces
def snake_case__ ( self , _lowerCamelCase):
return self.sp_model.PieceToId(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
return self.sp_model.IdToPiece(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : str = """""".join(_lowerCamelCase).replace(_lowerCamelCase , """ """).strip()
return out_string
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase)
if token_ids_a is not None:
return ([0] * len(_lowerCamelCase)) + [1] + ([0] * len(_lowerCamelCase)) + [1, 1]
return ([0] * len(_lowerCamelCase)) + [1, 1]
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : int = [self.sep_token_id]
UpperCAmelCase__ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
if not os.path.isdir(_lowerCamelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
UpperCAmelCase__ : str = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(_lowerCamelCase , """wb""") as fi:
UpperCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase)
return (out_vocab_file,)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = super()._decode(*_lowerCamelCase , **_lowerCamelCase)
UpperCAmelCase__ : Any = text.replace(""" """ , """""").replace("""\u2582""" , """ """).replace("""\u2583""" , """\n""")
return text
| 283
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5
|
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340
| 0
|
"""simple docstring"""
lowercase__ : Union[str, Any] = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowercase__ : Union[str, Any] = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def UpperCamelCase_ ( lowerCAmelCase__ : float , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = from_type.lower().strip('s' )
lowerCAmelCase_ : Optional[Any] = to_type.lower().strip('s' )
lowerCAmelCase_ : Union[str, Any] = UNIT_SYMBOL.get(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = UNIT_SYMBOL.get(lowerCAmelCase__ , lowerCAmelCase__ )
if from_sanitized not in METRIC_CONVERSION:
lowerCAmelCase_ : Any = (
f"Invalid 'from_type' value: {from_type!r}.\n"
f"Conversion abbreviations are: {', '.join(lowerCAmelCase__ )}"
)
raise ValueError(lowerCAmelCase__ )
if to_sanitized not in METRIC_CONVERSION:
lowerCAmelCase_ : str = (
f"Invalid 'to_type' value: {to_type!r}.\n"
f"Conversion abbreviations are: {', '.join(lowerCAmelCase__ )}"
)
raise ValueError(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = METRIC_CONVERSION[from_sanitized]
lowerCAmelCase_ : List[str] = METRIC_CONVERSION[to_sanitized]
lowerCAmelCase_ : Any = 1
if from_exponent > to_exponent:
lowerCAmelCase_ : Optional[Any] = from_exponent - to_exponent
else:
lowerCAmelCase_ : List[str] = -(to_exponent - from_exponent)
return value * pow(10 , lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict=1_3 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]=9_9 , SCREAMING_SNAKE_CASE_ : int=1_6 , SCREAMING_SNAKE_CASE_ : List[str]=3_6 , SCREAMING_SNAKE_CASE_ : List[Any]=6 , SCREAMING_SNAKE_CASE_ : Tuple=6 , SCREAMING_SNAKE_CASE_ : List[Any]=6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : List[str]=1_6 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : int=4 , SCREAMING_SNAKE_CASE_ : Tuple=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Tuple = is_training
lowerCAmelCase_ : str = use_input_mask
lowerCAmelCase_ : Union[str, Any] = use_token_type_ids
lowerCAmelCase_ : Tuple = use_labels
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : Any = embedding_size
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Optional[Any] = num_hidden_groups
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : List[Any] = type_vocab_size
lowerCAmelCase_ : Any = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Tuple = num_labels
lowerCAmelCase_ : Dict = num_choices
lowerCAmelCase_ : Tuple = scope
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : str = None
if self.use_input_mask:
lowerCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : str = None
if self.use_labels:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : Union[str, Any] = AlbertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : List[str] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : Optional[Any] = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , sentence_order_label=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : str = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase_ : List[str] = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Any = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : Union[str, Any] = self.num_labels
lowerCAmelCase_ : Union[str, Any] = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : List[str] = self.num_labels
lowerCAmelCase_ : List[Any] = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : Optional[Any] = self.num_choices
lowerCAmelCase_ : int = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase_ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=False ):
lowerCAmelCase_ : List[str] = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : str = AlbertModelTester(self )
lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : int = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Optional[Any] = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Any = AlbertModel.from_pretrained('albert-base-v2' )
lowerCAmelCase_ : Tuple = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase_ : str = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 289
| 1
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCamelCase :Optional[Any] = pytest.mark.integration
@require_faiss
class _lowerCAmelCase ( lowerCAmelCase_ ):
def _a (self ):
A_ : List[Any] = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(__lowerCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _a (self ):
import faiss
A_ : List[Any] = self._create_dummy_dataset()
A_ : int = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase )
A_ : Union[str, Any] = dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
A_, A_ : List[Any] = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def _a (self ):
import faiss
A_ : Optional[int] = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
A_, A_ : int = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def _a (self ):
import faiss
A_ : Optional[Any] = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
A_, A_ : Dict = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def _a (self ):
A_ : Tuple = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(__lowerCAmelCase , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def _a (self ):
from elasticsearch import Elasticsearch
A_ : Dict = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
A_ : Optional[Any] = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
A_ : Optional[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
A_ : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=__lowerCAmelCase )
A_, A_ : Tuple = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class _lowerCAmelCase ( lowerCAmelCase_ ):
def _a (self ):
import faiss
A_ : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
A_ : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
A_ : int = 1
A_, A_ : Tuple = index.search(__lowerCAmelCase )
self.assertRaises(__lowerCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
A_ : Union[str, Any] = np.eye(5 , dtype=np.floataa )[::-1]
A_, A_ : Any = index.search_batch(__lowerCAmelCase )
self.assertRaises(__lowerCAmelCase , index.search_batch , queries[0] )
A_ : str = [scores[0] for scores in total_scores]
A_ : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCAmelCase )
def _a (self ):
import faiss
A_ : Tuple = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
A_ : Dict = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCAmelCase ):
A_ : str = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def _a (self ):
import faiss
A_ : List[Any] = faiss.IndexFlat(5 )
A_ : Union[str, Any] = FaissIndex(custom_index=__lowerCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _a (self ):
import faiss
A_ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file:
index.save(tmp_file.name )
A_ : Optional[int] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
A_ : Dict = np.zeros(5 , dtype=np.floataa )
A_ : Optional[Any] = 1
A_, A_ : Optional[int] = index.search(__lowerCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a ( lowerCamelCase__ ):
'''simple docstring'''
import faiss
A_ : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
A_ : int = """index.faiss"""
A_ : Optional[int] = f'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
A_ : Tuple = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
A_ : List[Any] = np.zeros(5 , dtype=np.floataa )
A_ : List[str] = 1
A_, A_ : Optional[Any] = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowerCAmelCase ( lowerCAmelCase_ ):
def _a (self ):
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
A_ : List[str] = Elasticsearch()
A_ : int = {"""acknowledged""": True}
A_ : List[str] = ElasticSearchIndex(es_client=__lowerCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
A_ : Any = """foo"""
A_ : Optional[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
A_, A_ : int = index.search(__lowerCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
A_ : str = """foo"""
A_ : Any = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
A_, A_ : List[str] = index.search(__lowerCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
A_ : Any = ["""foo""", """bar""", """foobar"""]
A_ : Optional[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
A_, A_ : List[str] = index.search_batch(__lowerCAmelCase )
A_ : Dict = [scores[0] for scores in total_scores]
A_ : str = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCAmelCase )
# batched queries with timeout
A_ : Union[str, Any] = ["""foo""", """bar""", """foobar"""]
A_ : Union[str, Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
A_, A_ : Optional[Any] = index.search_batch(__lowerCAmelCase , request_timeout=30 )
A_ : Optional[int] = [scores[0] for scores in total_scores]
A_ : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCAmelCase )
| 206
|
"""simple docstring"""
UpperCAmelCase__ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Return True if there is node that has not iterated.
_UpperCAmelCase = [False] * len(lowercase )
_UpperCAmelCase = [s]
_UpperCAmelCase = True
while queue:
_UpperCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase )
_UpperCAmelCase = True
_UpperCAmelCase = u
return visited[t]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [-1] * (len(lowercase ))
_UpperCAmelCase = 0
_UpperCAmelCase = []
_UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase ,lowercase ,lowercase ,lowercase ):
_UpperCAmelCase = float("""Inf""" )
_UpperCAmelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCAmelCase = min(lowercase ,graph[parent[s]][s] )
_UpperCAmelCase = parent[s]
max_flow += path_flow
_UpperCAmelCase = sink
while v != source:
_UpperCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCAmelCase = parent[v]
for i in range(len(lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 289
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ ={
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ =[
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
UpperCamelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 128
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _a ( _lowerCAmelCase ):
UpperCamelCase = ['''image_processor''', '''feature_extractor''']
UpperCamelCase = '''TvltImageProcessor'''
UpperCamelCase = '''TvltFeatureExtractor'''
def __init__( self : Union[str, Any], lowerCAmelCase__ : str, lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(image_processor=lowerCAmelCase__, feature_extractor=lowerCAmelCase__ )
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : int = feature_extractor
def __call__( self : List[str], lowerCAmelCase__ : Optional[int]=None, lowerCAmelCase__ : str=None, lowerCAmelCase__ : Dict=None, lowerCAmelCase__ : str=None, lowerCAmelCase__ : Optional[int]=False, lowerCAmelCase__ : str=False, *lowerCAmelCase__ : List[str], **lowerCAmelCase__ : Optional[int], ) -> Dict:
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
_UpperCamelCase : Optional[int] = None
if images is not None:
_UpperCamelCase : Optional[int] = self.image_processor(lowerCAmelCase__, mask_pixel=lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__ )
if images_mixed is not None:
_UpperCamelCase : str = self.image_processor(lowerCAmelCase__, is_mixed=lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__ )
if audio is not None:
_UpperCamelCase : Union[str, Any] = self.feature_extractor(
lowerCAmelCase__, *lowerCAmelCase__, sampling_rate=lowerCAmelCase__, mask_audio=lowerCAmelCase__, **lowerCAmelCase__ )
_UpperCamelCase : str = {}
if audio is not None:
output_dict.update(lowerCAmelCase__ )
if images is not None:
output_dict.update(lowerCAmelCase__ )
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase__ )
return output_dict
@property
def snake_case ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[str] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 128
| 1
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_lowerCamelCase : Dict = "src/diffusers"
# Matches is_xxx_available()
_lowerCamelCase : Any = re.compile(R"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
_lowerCamelCase : Dict = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
_lowerCamelCase : int = "\n{0} = None\n"
_lowerCamelCase : List[str] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
_lowerCamelCase : Any = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def a__ ( UpperCAmelCase : Dict ) -> Tuple:
UpperCAmelCase : Dict = _re_backend.findall(lowercase_ )
if len(lowercase_ ) == 0:
return None
return "_and_".join(lowercase_ )
def a__ ( ) -> str:
with open(os.path.join(lowercase_ , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : str = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Tuple = {}
# Go through the end of the file
while line_index < len(lowercase_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase : Dict = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
UpperCAmelCase : Tuple = []
# Until we unindent, add backend objects to the list
while line_index < len(lowercase_ ) and len(lines[line_index] ) > 1:
UpperCAmelCase : Optional[Any] = lines[line_index]
UpperCAmelCase : int = _re_single_line_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase_ ) > 0:
UpperCAmelCase : int = objects
else:
line_index += 1
return backend_specific_objects
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] ) -> Dict:
if name.isupper():
return DUMMY_CONSTANT.format(lowercase_ )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase_ , lowercase_ )
else:
return DUMMY_CLASS.format(lowercase_ , lowercase_ )
def a__ ( UpperCAmelCase : Union[str, Any]=None ) -> Union[str, Any]:
if backend_specific_objects is None:
UpperCAmelCase : Dict = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase : Any = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase : str = '''[''' + ''', '''.join(f'''\"{b}\"''' for b in backend.split('''_and_''' ) ) + ''']'''
UpperCAmelCase : Union[str, Any] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase_ , lowercase_ ) for o in objects] )
UpperCAmelCase : Dict = dummy_file
return dummy_files
def a__ ( UpperCAmelCase : Any=False ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase : Dict = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
UpperCAmelCase : str = os.path.join(lowercase_ , '''utils''' )
UpperCAmelCase : List[str] = {
backend: os.path.join(lowercase_ , f'''dummy_{short_names.get(lowercase_ , lowercase_ )}_objects.py''' )
for backend in dummy_files.keys()
}
UpperCAmelCase : Any = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase_ ):
with open(lowercase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : Dict = f.read()
else:
UpperCAmelCase : Any = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(lowercase_ , lowercase_ )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
f'''diffusers.utils.dummy_{short_names.get(lowercase_ , lowercase_ )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowerCamelCase : Dict = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 336
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
default=A_, metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(A_ )} )
lowercase__ = field(
default=A_, metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
lowercase__ = field(
default=1_28, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
lowercase__ = field(
default=1_28, metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''}, )
lowercase__ = field(
default=64, metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
}, )
lowercase__ = field(
default=30, metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
lowercase__ = field(
default=0.0, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowercase__ = field(
default=20, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowercase__ = field(
default=0, metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
}, )
lowercase__ = field(default=1, metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''train'''
lowercase__ = '''dev'''
class UpperCAmelCase_ ( A_ ):
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
def __init__( self : List[Any] , snake_case_ : SquadDataTrainingArguments , snake_case_ : PreTrainedTokenizer , snake_case_ : Optional[int] = None , snake_case_ : Union[str, Split] = Split.train , snake_case_ : Optional[bool] = False , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = "pt" , ) -> Union[str, Any]:
'''simple docstring'''
A__ = args
A__ = is_language_sensitive
A__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(snake_case_ , snake_case_ ):
try:
A__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
A__ = mode
# Load data features from cache or dataset file
A__ = "v2" if args.version_2_with_negative else "v1"
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + ".lock"
with FileLock(snake_case_ ):
if os.path.exists(snake_case_ ) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(snake_case_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A__ = self.old_features["features"]
A__ = self.old_features.get("dataset" , snake_case_ )
A__ = self.old_features.get("examples" , snake_case_ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir )
else:
A__ = self.processor.get_train_examples(args.data_dir )
A__, A__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=snake_case_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=snake_case_ , )
A__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , snake_case_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] , snake_case_ : Any ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A__ = self.features[i]
A__ = torch.tensor(feature.input_ids , dtype=torch.long )
A__ = torch.tensor(feature.attention_mask , dtype=torch.long )
A__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
A__ = torch.tensor(feature.cls_index , dtype=torch.long )
A__ = torch.tensor(feature.p_mask , dtype=torch.float )
A__ = torch.tensor(feature.is_impossible , dtype=torch.float )
A__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A__ = torch.tensor(feature.start_position , dtype=torch.long )
A__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 247
| 0
|
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = [0] * no_of_processes
_snake_case = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_SCREAMING_SNAKE_CASE ):
_snake_case = burst_time[i]
_snake_case = []
_snake_case = 0
_snake_case = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_snake_case = []
_snake_case = -1
for i in range(_SCREAMING_SNAKE_CASE ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_snake_case = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_snake_case = i
total_time += burst_time[target_process]
completed += 1
_snake_case = 0
_snake_case = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = [0] * no_of_processes
for i in range(_SCREAMING_SNAKE_CASE ):
_snake_case = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
__lowerCAmelCase = 4
__lowerCAmelCase = [2, 5, 3, 7]
__lowerCAmelCase = [0, 0, 0, 0]
__lowerCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 358
|
'''simple docstring'''
import qiskit
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
_snake_case = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_snake_case = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 270
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCAmelCase: Optional[Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCAmelCase: Tuple = parser.parse_args()
if args.model_type == "roberta":
lowerCAmelCase: Tuple = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase: Optional[int] = 'roberta'
elif args.model_type == "gpt2":
lowerCAmelCase: int = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCAmelCase: List[Any] = 'transformer'
lowerCAmelCase: int = model.state_dict()
lowerCAmelCase: Optional[Any] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCAmelCase: Optional[Any] = state_dict[F"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCAmelCase: Tuple = F"{prefix}.embeddings.{w}.weight"
lowerCAmelCase: List[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCAmelCase: List[str] = F"{prefix}.embeddings.LayerNorm.{w}"
lowerCAmelCase: str = state_dict[param_name]
# Transformer Blocks #
lowerCAmelCase: List[Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCAmelCase: Dict = state_dict[
F"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
lowerCAmelCase: List[str] = state_dict[F"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCAmelCase: Tuple = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCAmelCase: str = state_dict[F"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase: str = state_dict[F"lm_head.dense.{w}"]
lowerCAmelCase: str = state_dict[F"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCAmelCase: Dict = state_dict[F"{prefix}.ln_f.{w}"]
lowerCAmelCase: Optional[Any] = state_dict['lm_head.weight']
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 297
|
from math import factorial
__snake_case = {str(digit): factorial(digit) for digit in range(10)}
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(SCREAMING_SNAKE_CASE__ ) )
def _A ( SCREAMING_SNAKE_CASE__ : int = 60 , SCREAMING_SNAKE_CASE__ : int = 1000000 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
UpperCamelCase :Any = 0
# the cached sizes of the previous chains
UpperCamelCase :dict[int, int] = {}
for start_chain_element in range(1 , SCREAMING_SNAKE_CASE__ ):
# The temporary set will contain the elements of the chain
UpperCamelCase :List[Any] = set()
UpperCamelCase :Any = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCamelCase :Optional[Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(SCREAMING_SNAKE_CASE__ )
chain_set_length += 1
UpperCamelCase :List[Any] = digit_factorial_sum(SCREAMING_SNAKE_CASE__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCamelCase :Any = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 259
| 0
|
import qiskit
def lowerCAmelCase__ ( lowerCamelCase_ : int = 2):
'''simple docstring'''
lowerCAmelCase__ : Any = qubits
# Using Aer's simulator
lowerCAmelCase__ : Any = qiskit.Aer.get_backend('''aer_simulator''')
# Creating a Quantum Circuit acting on the q register
lowerCAmelCase__ : Optional[Any] = qiskit.QuantumCircuit(lowerCamelCase_ ,lowerCamelCase_)
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0)
for i in range(1 ,lowerCamelCase_):
# Adding CX (CNOT) gate
circuit.cx(i - 1 ,lowerCamelCase_)
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_)) ,list(range(lowerCamelCase_)))
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCAmelCase__ : Optional[Any] = qiskit.execute(lowerCamelCase_ ,lowerCamelCase_ ,shots=1000)
return job.result().get_counts(lowerCamelCase_)
if __name__ == "__main__":
print(f"""Total count for various states are: {quantum_entanglement(3)}""")
| 94
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =IFInpaintingSuperResolutionPipeline
snake_case_ =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
snake_case_ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
snake_case_ =PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=0 ) -> Dict:
"""simple docstring"""
if str(__lowerCamelCase ).startswith('''mps''' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(__lowerCamelCase )
else:
lowerCAmelCase__ : int = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCAmelCase__ : str = floats_tensor((1, 3, 16, 16) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase__ : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''' )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
self._test_save_load_local()
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
| 94
| 1
|
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase):
return abs(lowerCamelCase) if a == 0 else greatest_common_divisor(b % a, lowerCamelCase)
def __magic_name__( lowerCamelCase, lowerCamelCase):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__lowerCAmelCase , __lowerCAmelCase = y, x % y
return abs(lowerCamelCase)
def __magic_name__( ):
try:
__lowerCAmelCase = input('''Enter two integers separated by comma (,): ''').split(''',''')
__lowerCAmelCase = int(nums[0])
__lowerCAmelCase = int(nums[1])
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(lowerCamelCase, lowerCamelCase)}""")
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCamelCase, lowerCamelCase)}""")
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''')
if __name__ == "__main__":
main()
| 174
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = ['pixel_values']
def __init__(self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ):
super().__init__(**__lowercase )
__lowerCAmelCase = size if size is not None else {'''shortest_edge''': 3_84}
__lowerCAmelCase = get_size_dict(__lowercase , default_to_square=__lowercase )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
# Default value set here for backwards compatibility where the value in config is None
__lowerCAmelCase = crop_pct if crop_pct is not None else 2_24 / 2_56
__lowerCAmelCase = resample
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ):
__lowerCAmelCase = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
__lowerCAmelCase = size['''shortest_edge''']
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__lowerCAmelCase = int(shortest_edge / crop_pct )
__lowerCAmelCase = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
__lowerCAmelCase = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ):
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ):
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ):
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__lowercase , default_to_square=__lowercase )
__lowerCAmelCase = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__lowerCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 174
| 1
|
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["keras_nlp"]
def __init__( self: int ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Tuple ) -> Tuple:
requires_backends(self ,["""keras_nlp"""] )
| 368
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[int] = "roformer"
def __init__( self: Optional[int] ,lowerCamelCase_: Tuple=50000 ,lowerCamelCase_: Optional[int]=None ,lowerCamelCase_: List[Any]=768 ,lowerCamelCase_: List[Any]=12 ,lowerCamelCase_: Optional[int]=12 ,lowerCamelCase_: Optional[Any]=3072 ,lowerCamelCase_: int="gelu" ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: Any=1536 ,lowerCamelCase_: str=2 ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-12 ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: Any=False ,lowerCamelCase_: Union[str, Any]=True ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size if embedding_size is None else embedding_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = rotary_value
UpperCAmelCase_ : str = use_cache
class _snake_case ( __snake_case ):
'''simple docstring'''
@property
def A__ ( self: Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase_ : Optional[Any] = {0: """batch""", 1: """sequence"""}
UpperCAmelCase_ : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 59
| 0
|
"""simple docstring"""
from __future__ import annotations
import queue
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = data
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Dict = None
def lowercase_ ( ):
print("""\n********Press N to stop entering at any point of time********\n""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input("""Enter the value of the root node: """ ).strip().lower()
SCREAMING_SNAKE_CASE__ : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE__ : int = TreeNode(int(_snake_case ) )
q.put(_snake_case )
while not q.empty():
SCREAMING_SNAKE_CASE__ : Dict = q.get()
SCREAMING_SNAKE_CASE__ : Any = f'''Enter the left node of {node_found.data}: '''
SCREAMING_SNAKE_CASE__ : List[Any] = input(_snake_case ).strip().lower() or """n"""
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TreeNode(int(_snake_case ) )
SCREAMING_SNAKE_CASE__ : List[Any] = left_node
q.put(_snake_case )
SCREAMING_SNAKE_CASE__ : int = f'''Enter the right node of {node_found.data}: '''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input(_snake_case ).strip().lower() or """n"""
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE__ : Any = TreeNode(int(_snake_case ) )
SCREAMING_SNAKE_CASE__ : str = right_node
q.put(_snake_case )
raise
def lowercase_ ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
print(node.data ,end=""",""" )
pre_order(node.left )
pre_order(node.right )
def lowercase_ ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
in_order(node.left )
print(node.data ,end=""",""" )
in_order(node.right )
def lowercase_ ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=""",""" )
def lowercase_ ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
SCREAMING_SNAKE_CASE__ : queue.Queue = queue.Queue()
q.put(_snake_case )
while not q.empty():
SCREAMING_SNAKE_CASE__ : int = q.get()
print(node_dequeued.data ,end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase_ ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
SCREAMING_SNAKE_CASE__ : queue.Queue = queue.Queue()
q.put(_snake_case )
while not q.empty():
SCREAMING_SNAKE_CASE__ : int = []
while not q.empty():
SCREAMING_SNAKE_CASE__ : List[Any] = q.get()
print(node_dequeued.data ,end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_snake_case )
def lowercase_ ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
SCREAMING_SNAKE_CASE__ : list[TreeNode] = []
SCREAMING_SNAKE_CASE__ : str = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=""",""" )
stack.append(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE__ : Tuple = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE__ : Dict = n.right
def lowercase_ ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
SCREAMING_SNAKE_CASE__ : list[TreeNode] = []
SCREAMING_SNAKE_CASE__ : Dict = node
while n or stack:
while n:
stack.append(_snake_case )
SCREAMING_SNAKE_CASE__ : str = n.left
SCREAMING_SNAKE_CASE__ : int = stack.pop()
print(n.data ,end=""",""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n.right
def lowercase_ ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = [], []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = node
stacka.append(_snake_case )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE__ : Optional[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_snake_case )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=""",""" )
def lowercase_ ( _snake_case = "" ,_snake_case=50 ,_snake_case="*" ):
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = divmod(width - len(_snake_case ) - 2 ,2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
UpperCAmelCase__ : TreeNode = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 5_0 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 25
|
"""simple docstring"""
UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase__ : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 25
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : str = KandinskyImgaImgPipeline
lowerCAmelCase_ : Union[str, Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
lowerCAmelCase_ : Optional[int] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
lowerCAmelCase_ : Optional[Any] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase_ : Optional[Any] = False
@property
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
snake_case_ = MultilingualCLIP(a__ )
snake_case_ = text_encoder.eval()
return text_encoder
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
snake_case_ = UNetaDConditionModel(**a__ )
return model
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_unet
snake_case_ = self.dummy_movq
snake_case_ = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.0_0_0_8_5,
"beta_end": 0.0_1_2,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
snake_case_ = DDIMScheduler(**a__ )
snake_case_ = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self , a__ , a__=0 ) -> Dict:
'''simple docstring'''
snake_case_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
snake_case_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((256, 256) )
if str(a__ ).startswith("mps" ):
snake_case_ = torch.manual_seed(a__ )
else:
snake_case_ = torch.Generator(device=a__ ).manual_seed(a__ )
snake_case_ = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**a__ )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = pipe(**self.get_dummy_inputs(a__ ) )
snake_case_ = output.images
snake_case_ = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
snake_case_ = "A red cartoon frog, 4k"
snake_case_ = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
snake_case_ = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ , snake_case_ = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
snake_case_ = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 92
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = "mgp-str"
def __init__( self , a__=[32, 128] , a__=4 , a__=3 , a__=27 , a__=38 , a__=50_257 , a__=30_522 , a__=768 , a__=12 , a__=12 , a__=4.0 , a__=True , a__=False , a__=1e-5 , a__=0.0 , a__=0.0 , a__=0.0 , a__=False , a__=0.0_2 , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = max_token_length
snake_case_ = num_character_labels
snake_case_ = num_bpe_labels
snake_case_ = num_wordpiece_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = mlp_ratio
snake_case_ = distilled
snake_case_ = layer_norm_eps
snake_case_ = drop_rate
snake_case_ = qkv_bias
snake_case_ = attn_drop_rate
snake_case_ = drop_path_rate
snake_case_ = output_aa_attentions
snake_case_ = initializer_range
| 92
| 1
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCAmelCase__ (lowerCAmelCase_ = 100 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
for i in range(2 , max_n + 1 ):
__SCREAMING_SNAKE_CASE = pre_numerator
__SCREAMING_SNAKE_CASE = 2 * i // 3 if i % 3 == 0 else 1
__SCREAMING_SNAKE_CASE = cur_numerator
__SCREAMING_SNAKE_CASE = e_cont * pre_numerator + temp
return sum_digits(lowerCAmelCase_ )
if __name__ == "__main__":
print(F"{solution() = }")
| 54
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = size if size is not None else {'''shortest_edge''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = crop_pct
__UpperCamelCase = resample
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCamelCase = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCamelCase = int(size['''height'''] / crop_pct )
else:
__UpperCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
else:
if "shortest_edge" in size:
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
elif "height" in size and "width" in size:
__UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> str:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , )-> PIL.Image.Image:
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 328
| 0
|
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowerCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return model(**SCREAMING_SNAKE_CASE_ )
eval(**SCREAMING_SNAKE_CASE_ ).block_until_ready()
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
for model_name in ["roberta-base", "roberta-large"]:
lowerCAmelCase_ : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE_ : List[Any] ):
return model(**SCREAMING_SNAKE_CASE_ )
eval(**SCREAMING_SNAKE_CASE_ ).block_until_ready()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCAmelCase_ : Optional[int] = FlaxAutoModel.from_pretrained('bert-base' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCAmelCase_ : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE_ , revision='aaaaaa' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
lowerCAmelCase_ : str = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'Use `from_pt=True` to load this model' ):
lowerCAmelCase_ : Tuple = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 289
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int ):
# test for the above condition
self.test()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Optional[Any] = False
while not completed:
if counter == 1:
self.reset()
lowerCAmelCase_ : Any = self.advance()
if not self.does_advance(SCREAMING_SNAKE_CASE_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = self.update(SCREAMING_SNAKE_CASE_ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : int ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : str ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=False ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] ):
super(SCREAMING_SNAKE_CASE_ , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
lowerCAmelCase_ : Union[str, Any] = token_ids
lowerCAmelCase_ : Union[str, Any] = len(self.token_ids )
lowerCAmelCase_ : Union[str, Any] = -1 # the index of the currently fulfilled step
lowerCAmelCase_ : Dict = False
def SCREAMING_SNAKE_CASE__ ( self : int ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}" )
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Union[str, Any] = False
if self.does_advance(SCREAMING_SNAKE_CASE_ ):
self.fulfilled_idx += 1
lowerCAmelCase_ : Optional[int] = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : List[str] = completed
else:
# failed to make progress.
lowerCAmelCase_ : Optional[Any] = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : int = 0
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple=False ):
lowerCAmelCase_ : Any = PhrasalConstraint(self.token_ids )
if stateful:
lowerCAmelCase_ : int = self.seqlen
lowerCAmelCase_ : Dict = self.fulfilled_idx
lowerCAmelCase_ : Optional[Any] = self.completed
return new_constraint
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[List[int]] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
lowerCAmelCase_ : Tuple = max([len(SCREAMING_SNAKE_CASE_ ) for one in nested_token_ids] )
lowerCAmelCase_ : Optional[Any] = {}
for token_ids in nested_token_ids:
lowerCAmelCase_ : Union[str, Any] = root
for tidx, token_id in enumerate(SCREAMING_SNAKE_CASE_ ):
if token_id not in level:
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Tuple = level[token_id]
if no_subsets and self.has_subsets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F" {nested_token_ids}." )
lowerCAmelCase_ : Union[str, Any] = root
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase_ : str = self.trie
for current_token in current_seq:
lowerCAmelCase_ : Optional[int] = start[current_token]
lowerCAmelCase_ : Dict = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase_ : Any = self.next_tokens(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 0
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Tuple = list(root.values() )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return 1
else:
return sum([self.count_leaves(SCREAMING_SNAKE_CASE_ ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : Any = self.count_leaves(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) != leaf_count
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[List[int]] ):
super(SCREAMING_SNAKE_CASE_ , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
lowerCAmelCase_ : Dict = DisjunctiveTrie(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = nested_token_ids
lowerCAmelCase_ : Tuple = self.trie.max_height
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Optional[Any] = False
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : int = self.trie.next_tokens(self.current_seq )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}" )
lowerCAmelCase_ : Optional[int] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}" )
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Optional[Any] = False
if self.does_advance(SCREAMING_SNAKE_CASE_ ):
self.current_seq.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = True
else:
lowerCAmelCase_ : List[str] = True
self.reset()
lowerCAmelCase_ : Dict = self.trie.reached_leaf(self.current_seq )
lowerCAmelCase_ : List[str] = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Optional[Any] = []
def SCREAMING_SNAKE_CASE__ ( self : str ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict=False ):
lowerCAmelCase_ : Dict = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCAmelCase_ : Dict = self.seqlen
lowerCAmelCase_ : Optional[Any] = self.current_seq
lowerCAmelCase_ : List[Any] = self.completed
return new_constraint
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Constraint] ):
lowerCAmelCase_ : Optional[int] = constraints
# max # of steps required to fulfill a given constraint
lowerCAmelCase_ : Optional[int] = max([c.seqlen for c in constraints] )
lowerCAmelCase_ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Optional[int] = [constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : Optional[Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : List[str] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCAmelCase_ : List[Any] = constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.append(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.extend(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase_ : Dict = self.inprogress_constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.append(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.extend(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = self.add(SCREAMING_SNAKE_CASE_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = False, False
if self.completed:
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Dict = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Any = self.inprogress_constraint.update(SCREAMING_SNAKE_CASE_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase_ : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCAmelCase_ : str = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCAmelCase_ : Union[str, Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : str = pending_constraint.update(SCREAMING_SNAKE_CASE_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = None
if not complete and stepped:
lowerCAmelCase_ : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCAmelCase_ : Optional[int] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCAmelCase_ : Optional[Any] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str=True ):
lowerCAmelCase_ : List[Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCAmelCase_ : Any = [
constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCAmelCase_ : List[str] = self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 289
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_a , **_a ):
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 45
|
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[str] = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Any = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
| 116
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
_lowerCAmelCase : str = """dpt"""
def __init__( self , lowerCAmelCase=7_68 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=30_72 , lowerCAmelCase="gelu" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=3_84 , lowerCAmelCase=16 , lowerCAmelCase=3 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=[2, 5, 8, 11] , lowerCAmelCase="project" , lowerCAmelCase=[4, 2, 1, 0.5] , lowerCAmelCase=[96, 1_92, 3_84, 7_68] , lowerCAmelCase=2_56 , lowerCAmelCase=-1 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=0.4 , lowerCAmelCase=2_55 , lowerCAmelCase=0.1 , lowerCAmelCase=[1, 10_24, 24, 24] , lowerCAmelCase=[0, 1] , lowerCAmelCase=None , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
snake_case = hidden_size
snake_case = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
snake_case = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
snake_case = BitConfig(**_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
logger.info('Initializing the config with a `BiT` backbone.' )
snake_case = BitConfig(**_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
snake_case = backbone_featmap_shape
snake_case = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
snake_case = None
snake_case = None
snake_case = []
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = qkv_bias
snake_case = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
snake_case = readout_type
snake_case = reassemble_factors
snake_case = neck_hidden_sizes
snake_case = fusion_hidden_size
snake_case = head_in_index
snake_case = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case = use_auxiliary_head
snake_case = auxiliary_loss_weight
snake_case = semantic_loss_ignore_index
snake_case = semantic_classifier_dropout
def snake_case ( self ):
"""simple docstring"""
snake_case = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case = self.backbone_config.to_dict()
snake_case = self.__class__.model_type
return output
| 367
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowerCAmelCase__ ( _UpperCamelCase : int = 8 ) -> str:
"""simple docstring"""
snake_case = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : int ) -> str:
"""simple docstring"""
i -= len(_UpperCamelCase )
snake_case = i // 3
snake_case = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case = (
chars_incl
+ random(_UpperCamelCase , quotient + remainder )
+ random(_UpperCamelCase , _UpperCamelCase )
+ random(_UpperCamelCase , _UpperCamelCase )
)
snake_case = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : int ) -> str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
pass # Put your code here...
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
pass # Put your code here...
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : int = 8 ) -> bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case = any(char in ascii_uppercase for char in password )
snake_case = any(char in ascii_lowercase for char in password )
snake_case = any(char in digits for char in password )
snake_case = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowerCAmelCase__ ( ) -> Any:
"""simple docstring"""
snake_case = int(input('Please indicate the max length of your password: ' ).strip() )
snake_case = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(_UpperCamelCase ) )
print(
'Alternative Password generated:' , alternative_password_generator(_UpperCamelCase , _UpperCamelCase ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 149
| 0
|
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE( metaclass=a_ ):
_UpperCAmelCase = ["torch", "transformers", "onnx"]
def __init__( self: Optional[Any] , *UpperCamelCase: Union[str, Any] , **UpperCamelCase: List[Any] ) -> int:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase_ ( cls: Tuple , *UpperCamelCase: Optional[int] , **UpperCamelCase: Any ) -> Dict:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase_ ( cls: int , *UpperCamelCase: List[Any] , **UpperCamelCase: int ) -> List[str]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class __SCREAMING_SNAKE_CASE( metaclass=a_ ):
_UpperCAmelCase = ["torch", "transformers", "onnx"]
def __init__( self: str , *UpperCamelCase: Any , **UpperCamelCase: Optional[Any] ) -> Tuple:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase_ ( cls: str , *UpperCamelCase: str , **UpperCamelCase: Optional[Any] ) -> str:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase_ ( cls: Any , *UpperCamelCase: List[Any] , **UpperCamelCase: Tuple ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class __SCREAMING_SNAKE_CASE( metaclass=a_ ):
_UpperCAmelCase = ["torch", "transformers", "onnx"]
def __init__( self: Tuple , *UpperCamelCase: Dict , **UpperCamelCase: Optional[int] ) -> List[Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase_ ( cls: Union[str, Any] , *UpperCamelCase: List[str] , **UpperCamelCase: Any ) -> Optional[int]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase_ ( cls: int , *UpperCamelCase: Dict , **UpperCamelCase: Dict ) -> str:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class __SCREAMING_SNAKE_CASE( metaclass=a_ ):
_UpperCAmelCase = ["torch", "transformers", "onnx"]
def __init__( self: Dict , *UpperCamelCase: str , **UpperCamelCase: Any ) -> Optional[int]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase_ ( cls: Any , *UpperCamelCase: Optional[int] , **UpperCamelCase: List[str] ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase_ ( cls: List[str] , *UpperCamelCase: List[Any] , **UpperCamelCase: int ) -> Optional[int]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class __SCREAMING_SNAKE_CASE( metaclass=a_ ):
_UpperCAmelCase = ["torch", "transformers", "onnx"]
def __init__( self: List[Any] , *UpperCamelCase: int , **UpperCamelCase: List[Any] ) -> Union[str, Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase_ ( cls: List[Any] , *UpperCamelCase: str , **UpperCamelCase: Tuple ) -> str:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase_ ( cls: Dict , *UpperCamelCase: Tuple , **UpperCamelCase: Tuple ) -> List[Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class __SCREAMING_SNAKE_CASE( metaclass=a_ ):
_UpperCAmelCase = ["torch", "transformers", "onnx"]
def __init__( self: str , *UpperCamelCase: List[str] , **UpperCamelCase: int ) -> Dict:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase_ ( cls: Any , *UpperCamelCase: str , **UpperCamelCase: Any ) -> Optional[Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase_ ( cls: Optional[int] , *UpperCamelCase: List[Any] , **UpperCamelCase: Tuple ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase : Tuple = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> Any:
lowerCAmelCase__ : str = len(__UpperCAmelCase )
for i in range(length - 1 ):
lowerCAmelCase__ : Optional[int] = i
for k in range(i + 1 , __UpperCAmelCase ):
if collection[k] < collection[least]:
lowerCAmelCase__ : Dict = k
if least != i:
lowerCAmelCase__ : str = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_A = input("""Enter numbers separated by a comma:\n""").strip()
_A = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 367
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_A = logging.get_logger(__name__)
class _lowerCamelCase ( a_ ):
def __init__( self : Union[str, Any] , *UpperCamelCase : int , **UpperCamelCase : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 212
| 0
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Tuple:
UpperCAmelCase : Dict = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCAmelCase : Optional[Any] = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
UpperCAmelCase : Optional[Any] = f"""{src_lang}-{tgt_lang}"""
UpperCAmelCase : List[Any] = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(a_ , exist_ok=a_ )
UpperCAmelCase : Union[str, Any] = os.path.join(a_ , '''README.md''' )
print(f"""Generating {path}""" )
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(a_ )
# make sure we are under the root of the project
UpperCamelCase__: Dict = Path(__file__).resolve().parent.parent.parent
UpperCamelCase__: Tuple = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: int = model_name.split("-")
UpperCamelCase__: Optional[int] = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 23
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 178
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Dict = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCamelCase_ : Any = random.Random()
def _A ( lowercase , lowercase=1.0 , lowercase=None , lowercase=None ):
"""simple docstring"""
if rng is None:
a =global_rng
a =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=400 , __A=2000 , __A=10 , __A=160 , __A=8 , __A=0.0 , __A=4000 , __A=False , __A=True , ) -> Optional[Any]:
a =parent
a =batch_size
a =min_seq_length
a =max_seq_length
a =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a =padding_value
a =sampling_rate
a =return_attention_mask
a =do_normalize
a =feature_size
a =chunk_length
a =hop_length
def SCREAMING_SNAKE_CASE ( self ) -> str:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE ( self , __A=False , __A=False ) -> str:
def _flatten(__A ):
return list(itertools.chain(*__A ) )
if equal_length:
a =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a =[np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = WhisperFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =WhisperFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a =feat_extract_first.save_pretrained(__A )[0]
check_json_file_has_correct_format(__A )
a =self.feature_extraction_class.from_pretrained(__A )
a =feat_extract_first.to_dict()
a =feat_extract_second.to_dict()
a =feat_extract_first.mel_filters
a =feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a =os.path.join(__A , '''feat_extract.json''' )
feat_extract_first.to_json_file(__A )
a =self.feature_extraction_class.from_json_file(__A )
a =feat_extract_first.to_dict()
a =feat_extract_second.to_dict()
a =feat_extract_first.mel_filters
a =feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a =[np.asarray(__A ) for speech_input in speech_inputs]
# Test feature size
a =feature_extractor(__A , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
a =feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
a =feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test batched
a =feature_extractor(__A , return_tensors='''np''' ).input_features
a =feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a =[floats_list((1, x) )[0] for x in (800, 800, 800)]
a =np.asarray(__A )
a =feature_extractor(__A , return_tensors='''np''' ).input_features
a =feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test truncation required
a =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
a =[np.asarray(__A ) for speech_input in speech_inputs]
a =[x[: feature_extractor.n_samples] for x in speech_inputs]
a =[np.asarray(__A ) for speech_input in speech_inputs_truncated]
a =feature_extractor(__A , return_tensors='''np''' ).input_features
a =feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
import torch
a =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a =np.random.rand(100 , 32 ).astype(np.floataa )
a =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a =feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a =feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict:
a =load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
a =ds.sort('''id''' ).select(range(__A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self ) -> Any:
# fmt: off
a =torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
a =self._load_datasamples(1 )
a =WhisperFeatureExtractor()
a =feature_extractor(__A , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __A , atol=1E-4 ) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a =self._load_datasamples(1 )[0]
a =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
a =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__A )[0]
self.assertTrue(np.all(np.mean(__A ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__A ) - 1 ) < 1E-3 ) )
| 215
| 0
|
lowercase__ :dict[str, float] = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.6_02_17_66_34E-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.355_818,
}
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowercase = (
f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
f'Valid values are: {", ".join(lowerCAmelCase__ )}'
)
raise ValueError(lowerCAmelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Union[str, Any] =RoCBertTokenizer
lowercase_ : str =None
lowercase_ : Optional[Any] =False
lowercase_ : Any =True
lowercase_ : int =filter_non_english
def A__ ( self):
super().setUp()
lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowercase = {}
lowercase = {}
for i, value in enumerate(A__):
lowercase = i
lowercase = i
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''])
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''word_shape_file'''])
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''word_pronunciation_file'''])
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
with open(self.word_shape_file ,'''w''' ,encoding='''utf-8''') as word_shape_writer:
json.dump(A__ ,A__ ,ensure_ascii=A__)
with open(self.word_pronunciation_file ,'''w''' ,encoding='''utf-8''') as word_pronunciation_writer:
json.dump(A__ ,A__ ,ensure_ascii=A__)
def A__ ( self):
lowercase = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file)
lowercase = tokenizer.tokenize('''你好[SEP]你是谁''')
self.assertListEqual(A__ ,['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__) ,[5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A__) ,[5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A__) ,[5, 6, 2, 5, 7, 8])
def A__ ( self):
lowercase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''') ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''h\u00E9llo'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__ ,never_split=['''[UNK]'''])
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''') ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''])
def A__ ( self):
lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowercase = {}
for i, token in enumerate(A__):
lowercase = i
lowercase = RoCBertWordpieceTokenizer(vocab=A__ ,unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') ,[])
self.assertListEqual(tokenizer.tokenize('''unwanted running''') ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.tokenize('''unwantedX running''') ,['''[UNK]''', '''runn''', '''##ing'''])
def A__ ( self):
self.assertTrue(_is_whitespace(''' '''))
self.assertTrue(_is_whitespace('''\t'''))
self.assertTrue(_is_whitespace('''\r'''))
self.assertTrue(_is_whitespace('''\n'''))
self.assertTrue(_is_whitespace('''\u00A0'''))
self.assertFalse(_is_whitespace('''A'''))
self.assertFalse(_is_whitespace('''-'''))
def A__ ( self):
self.assertTrue(_is_control('''\u0005'''))
self.assertFalse(_is_control('''A'''))
self.assertFalse(_is_control(''' '''))
self.assertFalse(_is_control('''\t'''))
self.assertFalse(_is_control('''\r'''))
def A__ ( self):
self.assertTrue(_is_punctuation('''-'''))
self.assertTrue(_is_punctuation('''$'''))
self.assertTrue(_is_punctuation('''`'''))
self.assertTrue(_is_punctuation('''.'''))
self.assertFalse(_is_punctuation('''A'''))
self.assertFalse(_is_punctuation(''' '''))
def A__ ( self):
lowercase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']])
if self.test_rust_tokenizer:
lowercase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A__) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']])
def A__ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase = tokenizer_r.encode_plus(
A__ ,return_attention_mask=A__ ,return_token_type_ids=A__ ,return_offsets_mapping=A__ ,add_special_tokens=A__ ,)
lowercase = tokenizer_r.do_lower_case if hasattr(A__ ,'''do_lower_case''') else False
lowercase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['''input_ids''']))
self.assertEqual([e[0] for e in expected_results] ,tokens['''offset_mapping'''])
def A__ ( self):
lowercase = ['''的''', '''人''', '''有''']
lowercase = ''''''.join(A__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = True
lowercase = self.tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = tokenizer_p.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_r.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_r.convert_ids_to_tokens(A__)
lowercase = tokenizer_p.convert_ids_to_tokens(A__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ ,A__)
self.assertListEqual(A__ ,A__)
lowercase = False
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = self.tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = tokenizer_r.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_p.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_r.convert_ids_to_tokens(A__)
lowercase = tokenizer_p.convert_ids_to_tokens(A__)
# it is expected that only the first Chinese character is not preceded by "##".
lowercase = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(A__)
]
self.assertListEqual(A__ ,A__)
self.assertListEqual(A__ ,A__)
@slow
def A__ ( self):
lowercase = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file)
lowercase = tokenizer.encode('''你好''' ,add_special_tokens=A__)
lowercase = tokenizer.encode('''你是谁''' ,add_special_tokens=A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__ ,A__)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def A__ ( self):
lowercase = self.get_tokenizers(do_lower_case=A__)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase = '''你好,你是谁'''
lowercase = tokenizer.tokenize(A__)
lowercase = tokenizer.convert_tokens_to_ids(A__)
lowercase = tokenizer.convert_tokens_to_shape_ids(A__)
lowercase = tokenizer.convert_tokens_to_pronunciation_ids(A__)
lowercase = tokenizer.prepare_for_model(
A__ ,A__ ,A__ ,add_special_tokens=A__)
lowercase = tokenizer.encode_plus(A__ ,add_special_tokens=A__)
self.assertEqual(A__ ,A__)
| 101
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __a :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=False , _a=True , _a="None" , _a=3 , _a=4 , _a=None , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : str = use_input_mask
SCREAMING_SNAKE_CASE__ : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = num_labels
SCREAMING_SNAKE_CASE__ : Dict = num_choices
SCREAMING_SNAKE_CASE__ : Union[str, Any] = relative_attention
SCREAMING_SNAKE_CASE__ : List[str] = position_biased_input
SCREAMING_SNAKE_CASE__ : int = pos_att_type
SCREAMING_SNAKE_CASE__ : Optional[int] = scope
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFDebertaVaModel(config=_a )
SCREAMING_SNAKE_CASE__ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : int = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ : List[str] = model(_a )
SCREAMING_SNAKE_CASE__ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFDebertaVaForMaskedLM(config=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE__ : Any = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = TFDebertaVaForSequenceClassification(config=_a )
SCREAMING_SNAKE_CASE__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : str = TFDebertaVaForTokenClassification(config=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE__ : List[str] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFDebertaVaForQuestionAnswering(config=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE__ : Dict = model(_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : str = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE :int = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
_SCREAMING_SNAKE_CASE :List[str] = False
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = TFDebertaVaModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self , config_class=_a , hidden_size=37 )
def _a ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(_a )
@require_tf
class __a (unittest.TestCase):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def _a ( self ) -> str:
"""simple docstring"""
pass
@slow
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , attention_mask=_a )[0]
SCREAMING_SNAKE_CASE__ : List[str] = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _a , atol=1E-4 )
| 56
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : int = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE__ : List[Any] = len(__lowerCAmelCase ) if (len(__lowerCAmelCase ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(__lowerCAmelCase ) , """Postfix""".center(__lowerCAmelCase ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCAmelCase ) == 0:
stack.append(__lowerCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCAmelCase ) # push x to stack
print(
x.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format
while len(__lowerCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format
return "".join(__lowerCAmelCase ) # return Postfix as str
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCAmelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE__ : Optional[int] = """)""" # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE__ : Optional[Any] = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(__lowerCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a :Optional[int] = input("\nEnter an Infix Equation = ") # Input an Infix equation
a :Dict = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 56
| 1
|
'''simple docstring'''
import numpy
class lowerCamelCase_ :
def __init__( self : Dict , _A : Optional[int] , _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCAmelCase__ : Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCAmelCase__ : List[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCAmelCase__ : str = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCAmelCase__ : List[Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCAmelCase__ : Optional[Any] = numpy.zeros(output_array.shape )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCAmelCase__ : List[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCAmelCase__ : str = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCAmelCase__ : Tuple = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCAmelCase__ : Any = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowercase_ ( self : Optional[int] , _A : Union[str, Any] , _A : List[str] , _A : Tuple ):
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
UpperCAmelCase__ : List[Any] = self.feedforward()
self.back_propagation()
if give_loss:
UpperCAmelCase__ : Optional[Any] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def lowercase_ ( self : Union[str, Any] , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = input_arr
UpperCAmelCase__ : List[str] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCAmelCase__ : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCAmelCase__ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def a__ ( lowerCAmelCase__ ) -> List[str]:
return 1 / (1 + numpy.exp(-value ))
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
return (value) * (1 - (value))
def a__ ( ) -> Optional[int]:
UpperCAmelCase__ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCAmelCase__ : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
UpperCAmelCase__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=UpperCamelCase_ , output_array=UpperCamelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=UpperCamelCase_ , iterations=10 , give_loss=UpperCamelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 181
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
# General docstring
_SCREAMING_SNAKE_CASE : List[Any] = "PoolFormerConfig"
# Base docstring
_SCREAMING_SNAKE_CASE : Any = "sail/poolformer_s12"
_SCREAMING_SNAKE_CASE : str = [1, 5_12, 7, 7]
# Image classification docstring
_SCREAMING_SNAKE_CASE : Any = "sail/poolformer_s12"
_SCREAMING_SNAKE_CASE : List[Any] = "tabby, tabby cat"
_SCREAMING_SNAKE_CASE : List[str] = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ = 0.0 ,UpperCamelCase_ = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
snake_case = 1 - drop_prob
snake_case = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case = keep_prob + torch.rand(UpperCamelCase_ ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
snake_case = input.div(UpperCamelCase_ ) * random_tensor
return output
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case = None ):
super().__init__()
snake_case = drop_prob
def a_ ( self , __snake_case ):
return drop_path(__snake_case , self.drop_prob , self.training )
def a_ ( self ):
return "p={}".format(self.drop_prob )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None ):
super().__init__()
snake_case = patch_size if isinstance(__snake_case , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case = stride if isinstance(__snake_case , collections.abc.Iterable ) else (stride, stride)
snake_case = padding if isinstance(__snake_case , collections.abc.Iterable ) else (padding, padding)
snake_case = nn.Convad(__snake_case , __snake_case , kernel_size=__snake_case , stride=__snake_case , padding=__snake_case )
snake_case = norm_layer(__snake_case ) if norm_layer else nn.Identity()
def a_ ( self , __snake_case ):
snake_case = self.projection(__snake_case )
snake_case = self.norm(__snake_case )
return embeddings
class A__ ( nn.GroupNorm ):
"""simple docstring"""
def __init__( self , __snake_case , **__snake_case ):
super().__init__(1 , __snake_case , **__snake_case )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__()
snake_case = nn.AvgPoolad(__snake_case , stride=1 , padding=pool_size // 2 , count_include_pad=__snake_case )
def a_ ( self , __snake_case ):
return self.pool(__snake_case ) - hidden_states
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case ):
super().__init__()
snake_case = nn.Convad(__snake_case , __snake_case , 1 )
snake_case = nn.Convad(__snake_case , __snake_case , 1 )
snake_case = PoolFormerDropPath(__snake_case )
if isinstance(config.hidden_act , __snake_case ):
snake_case = ACTaFN[config.hidden_act]
else:
snake_case = config.hidden_act
def a_ ( self , __snake_case ):
snake_case = self.conva(__snake_case )
snake_case = self.act_fn(__snake_case )
snake_case = self.drop(__snake_case )
snake_case = self.conva(__snake_case )
snake_case = self.drop(__snake_case )
return hidden_states
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
super().__init__()
snake_case = PoolFormerPooling(__snake_case )
snake_case = PoolFormerOutput(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case = PoolFormerGroupNorm(__snake_case )
snake_case = PoolFormerGroupNorm(__snake_case )
# Useful for training neural nets
snake_case = PoolFormerDropPath(__snake_case ) if drop_path > 0.0 else nn.Identity()
snake_case = config.use_layer_scale
if config.use_layer_scale:
snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((__snake_case) ) , requires_grad=__snake_case )
snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((__snake_case) ) , requires_grad=__snake_case )
def a_ ( self , __snake_case ):
if self.use_layer_scale:
snake_case = self.pooling(self.before_norm(__snake_case ) )
snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case = hidden_states + self.drop_path(__snake_case )
snake_case = ()
snake_case = self.output(self.after_norm(__snake_case ) )
snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case = hidden_states + self.drop_path(__snake_case )
snake_case = (output,) + outputs
return outputs
else:
snake_case = self.drop_path(self.pooling(self.before_norm(__snake_case ) ) )
# First residual connection
snake_case = pooling_output + hidden_states
snake_case = ()
# Second residual connection inside the PoolFormerOutput block
snake_case = self.drop_path(self.output(self.after_norm(__snake_case ) ) )
snake_case = hidden_states + layer_output
snake_case = (output,) + outputs
return outputs
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__()
snake_case = config
# stochastic depth decay rule
snake_case = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case = nn.ModuleList(__snake_case )
# Transformer blocks
snake_case = []
snake_case = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__snake_case , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__snake_case ) )
snake_case = nn.ModuleList(__snake_case )
def a_ ( self , __snake_case , __snake_case=False , __snake_case=True ):
snake_case = () if output_hidden_states else None
snake_case = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case , snake_case = layers
# Get patch embeddings from hidden_states
snake_case = embedding_layer(__snake_case )
# Send the embeddings through the blocks
for _, blk in enumerate(__snake_case ):
snake_case = blk(__snake_case )
snake_case = layer_outputs[0]
if output_hidden_states:
snake_case = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = PoolFormerConfig
__magic_name__ = 'poolformer'
__magic_name__ = 'pixel_values'
__magic_name__ = True
def a_ ( self , __snake_case ):
if isinstance(__snake_case , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__snake_case , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def a_ ( self , __snake_case , __snake_case=False ):
if isinstance(__snake_case , __snake_case ):
snake_case = value
_SCREAMING_SNAKE_CASE : Optional[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_SCREAMING_SNAKE_CASE : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , snake_case__ , )
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__(__snake_case )
snake_case = config
snake_case = PoolFormerEncoder(__snake_case )
# Initialize weights and apply final processing
self.post_init()
def a_ ( self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ ( self , __snake_case = None , __snake_case = None , __snake_case = None , ):
snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
snake_case = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , )
snake_case = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__snake_case , hidden_states=encoder_outputs.hidden_states , )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__()
snake_case = nn.Linear(config.hidden_size , config.hidden_size )
def a_ ( self , __snake_case ):
snake_case = self.dense(__snake_case )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , snake_case__ , )
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__(__snake_case )
snake_case = config.num_labels
snake_case = PoolFormerModel(__snake_case )
# Final norm
snake_case = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ ( self , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , ):
snake_case = return_dict if return_dict is not None else self.config.use_return_dict
snake_case = self.poolformer(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , )
snake_case = outputs[0]
snake_case = self.classifier(self.norm(__snake_case ).mean([-2, -1] ) )
snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case = '''single_label_classification'''
else:
snake_case = '''multi_label_classification'''
if self.config.problem_type == "regression":
snake_case = MSELoss()
if self.num_labels == 1:
snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case = loss_fct(__snake_case , __snake_case )
elif self.config.problem_type == "single_label_classification":
snake_case = CrossEntropyLoss()
snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case = BCEWithLogitsLoss()
snake_case = loss_fct(__snake_case , __snake_case )
if not return_dict:
snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states )
| 127
| 0
|
"""simple docstring"""
from math import pi, sqrt
def lowercase__(A ) ->float:
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(A ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(A )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase__() ->None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(A )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Tuple = 1.0
while num:
a : List[Any] = float(input("""Gamma of: """))
print(F"""gamma({num}) = {gamma(num)}""")
print("""\nEnter 0 to exit...""")
| 354
|
"""simple docstring"""
import os
from pathlib import Path
def lowercase__() ->List[Any]:
"""simple docstring"""
from torch.utils.cpp_extension import load
lowercase__ : Any= Path(A ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
lowercase__ : Any= [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , A , with_cuda=A , extra_include_paths=[str(A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 150
| 0
|
import random
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = num - 1
UpperCAmelCase = 0
while s % 2 == 0:
UpperCAmelCase = s // 2
t += 1
for _ in range(5 ):
UpperCAmelCase = random.randrange(2 , num - 1 )
UpperCAmelCase = pow(snake_case__ , snake_case__ , snake_case__ )
if v != 1:
UpperCAmelCase = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCAmelCase = i + 1
UpperCAmelCase = (v**2) % num
return True
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
if num < 2:
return False
UpperCAmelCase = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1024 ) -> List[Any]:
'''simple docstring'''
while True:
UpperCAmelCase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
__A : Optional[Any] = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 273
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = """falcon"""
lowerCAmelCase_ = ["""past_key_values"""]
def __init__( self : int , _lowercase : Dict=6_50_24 , _lowercase : List[str]=45_44 , _lowercase : List[Any]=32 , _lowercase : Optional[int]=71 , _lowercase : Optional[Any]=1E-5 , _lowercase : List[str]=0.02 , _lowercase : Any=True , _lowercase : Optional[int]=0.0 , _lowercase : Tuple=0.0 , _lowercase : Union[str, Any]=None , _lowercase : Any=False , _lowercase : Any=False , _lowercase : List[str]=True , _lowercase : List[str]=True , _lowercase : str=False , _lowercase : Tuple=11 , _lowercase : Optional[int]=11 , **_lowercase : int , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
# Backward compatibility with n_embed kwarg
SCREAMING_SNAKE_CASE__ = kwargs.pop("""n_embed""" , _lowercase )
SCREAMING_SNAKE_CASE__ = hidden_size if n_embed is None else n_embed
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = hidden_dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = bos_token_id
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = num_attention_heads if num_kv_heads is None else num_kv_heads
SCREAMING_SNAKE_CASE__ = alibi
SCREAMING_SNAKE_CASE__ = new_decoder_architecture
SCREAMING_SNAKE_CASE__ = multi_query # Ignored when new_decoder_architecture is True
SCREAMING_SNAKE_CASE__ = parallel_attn
SCREAMING_SNAKE_CASE__ = bias
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
@property
def __a ( self : str ):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def __a ( self : int ):
"""simple docstring"""
return not self.alibi
| 366
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES
else:
SCREAMING_SNAKE_CASE__ = {tokenizer_name: getattr(__UpperCamelCase , tokenizer_name + """Fast""" )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES[tokenizer_name]
SCREAMING_SNAKE_CASE__ = True
if checkpoint_name is None:
SCREAMING_SNAKE_CASE__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
SCREAMING_SNAKE_CASE__ = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(__UpperCamelCase , force_download=__UpperCamelCase )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = checkpoint.split("""/""" )
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
elif add_prefix:
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = dump_path
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
SCREAMING_SNAKE_CASE__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
SCREAMING_SNAKE_CASE__ = file_path.split(__UpperCamelCase )[-1][0]
if next_char == "/":
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
SCREAMING_SNAKE_CASE__ = tokenizer.save_pretrained(
__UpperCamelCase , legacy_format=__UpperCamelCase , filename_prefix=__UpperCamelCase )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(__UpperCamelCase )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
__lowerCamelCase : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 204
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _UpperCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
_A : Optional[Any] = '''levit'''
def __init__( self : str , lowerCAmelCase__ : List[Any]=2_2_4 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : List[Any]=1 , lowerCAmelCase__ : Union[str, Any]=1_6 , lowerCAmelCase__ : List[Any]=[1_2_8, 2_5_6, 3_8_4] , lowerCAmelCase__ : int=[4, 8, 1_2] , lowerCAmelCase__ : Dict=[4, 4, 4] , lowerCAmelCase__ : Any=[1_6, 1_6, 1_6] , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : Optional[Any]=[2, 2, 2] , lowerCAmelCase__ : Any=[2, 2, 2] , lowerCAmelCase__ : List[Any]=0.02 , **lowerCAmelCase__ : Any , ):
"""simple docstring"""
super().__init__(**lowercase_ )
__SCREAMING_SNAKE_CASE : Any = image_size
__SCREAMING_SNAKE_CASE : Tuple = num_channels
__SCREAMING_SNAKE_CASE : List[str] = kernel_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = stride
__SCREAMING_SNAKE_CASE : Union[str, Any] = padding
__SCREAMING_SNAKE_CASE : Tuple = hidden_sizes
__SCREAMING_SNAKE_CASE : Any = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = depths
__SCREAMING_SNAKE_CASE : List[Any] = key_dim
__SCREAMING_SNAKE_CASE : int = drop_path_rate
__SCREAMING_SNAKE_CASE : List[Any] = patch_size
__SCREAMING_SNAKE_CASE : Dict = attention_ratio
__SCREAMING_SNAKE_CASE : str = mlp_ratio
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _UpperCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
_A : Dict = version.parse('''1.11''' )
@property
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
return 1E-4
| 112
|
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowercase : Optional[int] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowercase : List[Any] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowercase : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Any=None , lowercase_ : str=None , lowercase_ : Dict=None , lowercase_ : Any=None , lowercase_ : int="auto" , lowercase_ : Tuple=-1 , lowercase_ : str=0.9 , lowercase_ : Union[str, Any]=5 , lowercase_ : List[str]=500 , lowercase_ : Union[str, Any]="gpt2-large" , lowercase_ : List[Any]=-1 , lowercase_ : str=1024 , lowercase_ : List[str]=25 , lowercase_ : str=5 , lowercase_ : List[Any]=True , lowercase_ : Tuple=25 , ):
lowercase_ : List[str] = compute_mauve(
p_text=lowercase_ , q_text=lowercase_ , p_features=lowercase_ , q_features=lowercase_ , p_tokens=lowercase_ , q_tokens=lowercase_ , num_buckets=lowercase_ , pca_max_data=lowercase_ , kmeans_explained_var=lowercase_ , kmeans_num_redo=lowercase_ , kmeans_max_iter=lowercase_ , featurize_model_name=lowercase_ , device_id=lowercase_ , max_text_length=lowercase_ , divergence_curve_discretization_size=lowercase_ , mauve_scaling_factor=lowercase_ , verbose=lowercase_ , seed=lowercase_ , )
return out
| 239
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Any=13 , _lowercase : Optional[int]=7 , _lowercase : List[Any]=True , _lowercase : List[str]=True , _lowercase : Tuple=True , _lowercase : List[str]=True , _lowercase : Optional[Any]=True , _lowercase : Tuple=False , _lowercase : int=False , _lowercase : int=False , _lowercase : Optional[int]=2 , _lowercase : int=99 , _lowercase : int=0 , _lowercase : List[Any]=32 , _lowercase : Optional[Any]=5 , _lowercase : Tuple=4 , _lowercase : Tuple=0.1 , _lowercase : Any=0.1 , _lowercase : List[Any]=5_12 , _lowercase : Any=12 , _lowercase : List[str]=2 , _lowercase : List[Any]=0.02 , _lowercase : Dict=3 , _lowercase : Optional[int]=4 , _lowercase : str="last" , _lowercase : int=None , _lowercase : int=None , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_lengths
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = gelu_activation
__UpperCAmelCase = sinusoidal_embeddings
__UpperCAmelCase = causal
__UpperCAmelCase = asm
__UpperCAmelCase = n_langs
__UpperCAmelCase = vocab_size
__UpperCAmelCase = n_special
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = summary_type
__UpperCAmelCase = use_proj
__UpperCAmelCase = scope
def a ( self : List[Any] ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_input_lengths:
__UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a ( self : Tuple ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def a ( self : int , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Tuple , ):
__UpperCAmelCase = FlaubertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase , lengths=_lowercase , langs=_lowercase )
__UpperCAmelCase = model(_lowercase , langs=_lowercase )
__UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : int , _lowercase : str , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , ):
__UpperCAmelCase = FlaubertWithLMHeadModel(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : Dict , _lowercase : List[Any] , _lowercase : str , _lowercase : int , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : int , ):
__UpperCAmelCase = FlaubertForQuestionAnsweringSimple(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
__UpperCAmelCase = model(_lowercase , start_positions=_lowercase , end_positions=_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self : Optional[int] , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : int , _lowercase : Dict , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] , _lowercase : List[Any] , ):
__UpperCAmelCase = FlaubertForQuestionAnswering(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
__UpperCAmelCase = model(
_lowercase , start_positions=_lowercase , end_positions=_lowercase , cls_index=_lowercase , is_impossible=_lowercase , p_mask=_lowercase , )
__UpperCAmelCase = model(
_lowercase , start_positions=_lowercase , end_positions=_lowercase , cls_index=_lowercase , is_impossible=_lowercase , )
((__UpperCAmelCase) , ) = result_with_labels.to_tuple()
__UpperCAmelCase = model(_lowercase , start_positions=_lowercase , end_positions=_lowercase )
((__UpperCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a ( self : str , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Any , _lowercase : int , _lowercase : Any , _lowercase : List[Any] , _lowercase : Any , _lowercase : Any , _lowercase : Any , ):
__UpperCAmelCase = FlaubertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
__UpperCAmelCase = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self : Tuple , _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : str , ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = FlaubertForTokenClassification(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : List[Any] , _lowercase : str , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : List[str] , ):
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = FlaubertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self : Dict ):
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[Any] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
a__ : str = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : str , _lowercase : List[str] , _lowercase : Dict , _lowercase : Optional[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a ( self : Union[str, Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : Any=False ):
__UpperCAmelCase = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def a ( self : Any ):
__UpperCAmelCase = FlaubertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_lowercase , emb_dim=37 )
def a ( self : Optional[int] ):
self.config_tester.run_common_tests()
def a ( self : int ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_lowercase )
def a ( self : List[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_lowercase )
def a ( self : int ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_lowercase )
def a ( self : Tuple ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_lowercase )
def a ( self : int ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_lowercase )
@slow
def a ( self : Any ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = FlaubertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
@require_torch_gpu
def a ( self : Union[str, Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__UpperCAmelCase = True
__UpperCAmelCase = model_class(config=_lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = torch.jit.trace(
_lowercase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowercase , os.path.join(_lowercase , '''traced_model.pt''' ) )
__UpperCAmelCase = torch.jit.load(os.path.join(_lowercase , '''traced_model.pt''' ) , map_location=_lowercase )
loaded(inputs_dict['''input_ids'''].to(_lowercase ) , inputs_dict['''attention_mask'''].to(_lowercase ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def a ( self : Union[str, Any] ):
__UpperCAmelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
__UpperCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
__UpperCAmelCase = model(_lowercase )[0]
__UpperCAmelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _lowercase )
__UpperCAmelCase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 86
|
"""simple docstring"""
def lowercase__ ( snake_case_ :int , snake_case_ :int , snake_case_ :int ):
__UpperCAmelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowercase__ ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
| 1
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
a_ = datasets.utils.logging.get_logger(__name__)
class _UpperCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =None
lowerCamelCase__ =None
class _UpperCamelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =datasets.Audio()
lowerCamelCase__ ='audio'
lowerCamelCase__ =AudioFolderConfig
lowerCamelCase__ =42 # definition at the bottom of the script
lowerCamelCase__ =AudioClassification(audio_column='audio' , label_column='label' )
a_ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
a_ = AUDIO_EXTENSIONS
| 76
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowercase : List[Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : str=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : Any =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__ : str =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__ : Dict =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : str =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : Dict=7, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : int=99, lowerCamelCase : Union[str, Any]=16, lowerCamelCase : List[str]=2, lowerCamelCase : int=4, lowerCamelCase : Tuple=4, lowerCamelCase : Optional[Any]="gelu", lowerCamelCase : List[str]=0.1, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=32, lowerCamelCase : List[str]=2, lowerCamelCase : Tuple=1, lowerCamelCase : Optional[int]=0, lowerCamelCase : int=0.02, )-> Optional[Any]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Any =is_training
lowerCamelCase__ : Optional[int] =use_labels
lowerCamelCase__ : List[str] =vocab_size
lowerCamelCase__ : List[Any] =hidden_size
lowerCamelCase__ : List[Any] =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : List[Any] =intermediate_size
lowerCamelCase__ : Union[str, Any] =hidden_act
lowerCamelCase__ : Optional[Any] =hidden_dropout_prob
lowerCamelCase__ : Tuple =attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] =max_position_embeddings
lowerCamelCase__ : List[Any] =eos_token_id
lowerCamelCase__ : Tuple =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
lowerCamelCase__ : List[Any] =initializer_range
def snake_case ( self : Optional[Any] )-> str:
lowerCamelCase__ : Dict =np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ), 3, self.vocab_size )
lowerCamelCase__ : Union[str, Any] =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa )), -1 )
lowerCamelCase__ : Dict =shift_tokens_right(lowerCamelCase, 1, 2 )
lowerCamelCase__ : Optional[Any] =BlenderbotConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=lowerCamelCase, )
lowerCamelCase__ : List[str] =prepare_blenderbot_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : str )-> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : Any =self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self : int, lowerCamelCase : Tuple, lowerCamelCase : Dict, lowerCamelCase : Tuple )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =20
lowerCamelCase__ : Optional[int] =model_class_name(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase__ : Any =model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Dict =jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype='''i4''' )
lowerCamelCase__ : List[Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCamelCase__ : int =model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : Optional[int] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
lowerCamelCase__ : Union[str, Any] =model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : int =model.decode(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str )-> List[str]:
lowerCamelCase__ : List[Any] =20
lowerCamelCase__ : List[Any] =model_class_name(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase__ , lowerCamelCase__ : str =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase__ : Tuple =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
lowerCamelCase__ : List[str] =model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCamelCase__ : List[Any] =model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : List[str] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
lowerCamelCase__ : Optional[Any] =model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : Optional[Any] =model.decode(lowerCamelCase, lowerCamelCase, decoder_attention_mask=lowerCamelCase )
lowerCamelCase__ : List[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 9_9
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
lowerCamelCase__ : Any =input_ids.shape[0]
lowerCamelCase__ : Any =BlenderbotConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def snake_case ( self : Any )-> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] =self._get_config_and_data()
lowerCamelCase__ : int =FlaxBlenderbotForConditionalGeneration(lowerCamelCase )
lowerCamelCase__ : str =lm_model(input_ids=lowerCamelCase )
lowerCamelCase__ : List[Any] =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, lowerCamelCase )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =BlenderbotConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
lowerCamelCase__ : Union[str, Any] =FlaxBlenderbotForConditionalGeneration(lowerCamelCase )
lowerCamelCase__ : List[Any] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa )
lowerCamelCase__ : Optional[Any] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa )
lowerCamelCase__ : Optional[int] =lm_model(input_ids=lowerCamelCase, decoder_input_ids=lowerCamelCase )
lowerCamelCase__ : List[str] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> Union[str, Any]:
lowerCamelCase__ : Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa )
lowerCamelCase__ : Optional[Any] =shift_tokens_right(lowerCamelCase, 1, 2 )
lowerCamelCase__ : str =np.equal(lowerCamelCase, 1 ).astype(np.floataa ).sum()
lowerCamelCase__ : List[str] =np.equal(lowerCamelCase, 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape, input_ids.shape )
self.assertEqual(lowerCamelCase, n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0], 2 ).all() )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
_a = True
_a = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_a = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def snake_case ( self : Union[str, Any] )-> List[str]:
lowerCamelCase__ : str =FlaxBlenderbotModelTester(self )
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[str] )-> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : List[Any] =self._prepare_for_class(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : int =model_class(lowerCamelCase )
@jax.jit
def encode_jitted(lowerCamelCase : int, lowerCamelCase : Union[str, Any]=None, **lowerCamelCase : List[str] ):
return model.encode(input_ids=lowerCamelCase, attention_mask=lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : Any =encode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : Dict =encode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase ):
self.assertEqual(jitted_output.shape, output.shape )
def snake_case ( self : List[str] )-> Dict:
lowerCamelCase__ , lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : Optional[Any] =model_class(lowerCamelCase )
lowerCamelCase__ : List[Any] =model.encode(inputs_dict['''input_ids'''], inputs_dict['''attention_mask'''] )
lowerCamelCase__ : Optional[int] ={
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Tuple ):
return model.decode(
decoder_input_ids=lowerCamelCase, decoder_attention_mask=lowerCamelCase, encoder_outputs=lowerCamelCase, )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : Union[str, Any] =decode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : Optional[Any] =decode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def snake_case ( self : Tuple )-> Tuple:
for model_class_name in self.all_model_classes:
lowerCamelCase__ : int =model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__ : Union[str, Any] =np.ones((1, 1) ) * model.config.eos_token_id
lowerCamelCase__ : Optional[Any] =model(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skipUnless(jax_device != '''cpu''', '''3B test too slow on CPU.''' )
@slow
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : List[Any] ={'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
lowerCamelCase__ : Optional[int] ={'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowerCamelCase__ : Tuple =FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''', from_pt=lowerCamelCase )
lowerCamelCase__ : int =BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowerCamelCase__ : str =['''Sam''']
lowerCamelCase__ : Union[str, Any] =tokenizer(lowerCamelCase, return_tensors='''jax''' )
lowerCamelCase__ : Tuple =model.generate(**lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Tuple ='''Sam is a great name. It means "sun" in Gaelic.'''
lowerCamelCase__ : Union[str, Any] =tokenizer.batch_decode(lowerCamelCase, **lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 238
| 0
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = model.config
SCREAMING_SNAKE_CASE : str = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
SCREAMING_SNAKE_CASE : str = MBartConfig(
is_decoder=a__ , is_encoder_decoder=a__ , add_cross_attention=a__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=a__ , add_final_layer_norm=a__ , )
return encoder_config, decoder_config
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if "encoder.model" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
SCREAMING_SNAKE_CASE : Dict = '''encoder.''' + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE : Optional[Any] = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE : Tuple = '''encoder.layernorm.bias'''
return name
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Optional[Any] = orig_state_dict.pop(a__ )
if "qkv" in key:
SCREAMING_SNAKE_CASE : List[str] = key.split('''.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_split[3] )
SCREAMING_SNAKE_CASE : Dict = int(key_split[5] )
SCREAMING_SNAKE_CASE : str = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim, :]
SCREAMING_SNAKE_CASE : str = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : List[Any] = val[:dim]
SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : Any = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = val
return orig_state_dict
def UpperCAmelCase_( a__ , a__=None , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DonutModel.from_pretrained(a__ ).eval()
# load HuggingFace model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = get_configs(a__ )
SCREAMING_SNAKE_CASE : List[Any] = DonutSwinModel(a__ )
SCREAMING_SNAKE_CASE : List[Any] = MBartForCausalLM(a__ )
SCREAMING_SNAKE_CASE : List[Any] = VisionEncoderDecoderModel(encoder=a__ , decoder=a__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = original_model.state_dict()
SCREAMING_SNAKE_CASE : str = convert_state_dict(a__ , a__ )
model.load_state_dict(a__ )
# verify results on scanned document
SCREAMING_SNAKE_CASE : Dict = load_dataset('''hf-internal-testing/example-documents''' )
SCREAMING_SNAKE_CASE : Optional[Any] = dataset['''test'''][0]['''image'''].convert('''RGB''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained(a__ , from_slow=a__ )
SCREAMING_SNAKE_CASE : List[str] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
SCREAMING_SNAKE_CASE : int = DonutProcessor(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = processor(a__ , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
SCREAMING_SNAKE_CASE : Optional[int] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''When is the coffee break?'''
SCREAMING_SNAKE_CASE : Any = task_prompt.replace('''{user_input}''' , a__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
SCREAMING_SNAKE_CASE : Union[str, Any] = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
SCREAMING_SNAKE_CASE : int = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
SCREAMING_SNAKE_CASE : str = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
SCREAMING_SNAKE_CASE : Optional[int] = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
SCREAMING_SNAKE_CASE : List[str] = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
SCREAMING_SNAKE_CASE : Any = original_model.decoder.tokenizer(a__ , add_special_tokens=a__ , return_tensors='''pt''' )[
'''input_ids'''
]
SCREAMING_SNAKE_CASE : Optional[Any] = original_model.encoder.model.patch_embed(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = model.encoder.embeddings(a__ )
assert torch.allclose(a__ , a__ , atol=1e-3 )
# verify encoder hidden states
SCREAMING_SNAKE_CASE : Dict = original_model.encoder(a__ )
SCREAMING_SNAKE_CASE : int = model.encoder(a__ ).last_hidden_state
assert torch.allclose(a__ , a__ , atol=1e-2 )
# verify decoder hidden states
SCREAMING_SNAKE_CASE : Optional[Any] = original_model(a__ , a__ , a__ ).logits
SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ , decoder_input_ids=a__ ).logits
assert torch.allclose(a__ , a__ , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
a__ : Tuple = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 19
|
from sklearn.metrics import matthews_corrcoef
import datasets
a__ : Optional[Any] = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
a__ : str = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
a__ : Union[str, Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) ->List[str]:
return {
"matthews_correlation": float(matthews_corrcoef(_lowerCamelCase , _lowerCamelCase , sample_weight=_lowerCamelCase ) ),
}
| 19
| 1
|
import re
def __lowercase ( __lowerCAmelCase : str ):
if len(re.findall('[ATCG]' , __lowerCAmelCase ) ) != len(__lowerCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 240
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : str = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = '''mobilenet_v2'''
def __init__( self :Optional[Any] ,__snake_case :List[Any]=3 ,__snake_case :Any=2_24 ,__snake_case :Dict=1.0 ,__snake_case :str=8 ,__snake_case :Union[str, Any]=8 ,__snake_case :Optional[Any]=6 ,__snake_case :str=32 ,__snake_case :Tuple=True ,__snake_case :Union[str, Any]=True ,__snake_case :Any="relu6" ,__snake_case :List[str]=True ,__snake_case :Dict=0.8 ,__snake_case :Optional[int]=0.02 ,__snake_case :Tuple=0.0_01 ,__snake_case :Dict=2_55 ,**__snake_case :Dict ,) -> Any:
super().__init__(**__snake_case )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
a__ = num_channels
a__ = image_size
a__ = depth_multiplier
a__ = depth_divisible_by
a__ = min_depth
a__ = expand_ratio
a__ = output_stride
a__ = first_layer_is_expansion
a__ = finegrained_output
a__ = hidden_act
a__ = tf_padding
a__ = classifier_dropout_prob
a__ = initializer_range
a__ = layer_norm_eps
a__ = semantic_loss_ignore_index
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = version.parse('''1.11''' )
@property
def lowerCamelCase__( self :Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def lowerCamelCase__( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def lowerCamelCase__( self :Any ) -> float:
return 1E-4
| 240
| 1
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Tuple =FlaxAutoencoderKL
@property
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = (32, 32)
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.uniform(UpperCAmelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE__ = self.dummy_input
return init_dict, inputs_dict
| 169
|
from __future__ import annotations
class lowercase__ :
def __init__( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = text, pattern
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ ), len(UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A_ ( self : Tuple , UpperCAmelCase_ : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A_ ( self : str ):
# searches pattern in text and returns index positions
SCREAMING_SNAKE_CASE__ = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE__ = self.mismatch_in_text(UpperCAmelCase_ )
if mismatch_index == -1:
positions.append(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__snake_case = """ABAABA"""
__snake_case = """AB"""
__snake_case = BoyerMooreSearch(text, pattern)
__snake_case = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 169
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=400 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=True , _snake_case=[0.5, 0.5, 0.5] , _snake_case=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 20}
_lowerCAmelCase = do_thumbnail
_lowerCAmelCase = do_align_axis
_lowerCAmelCase = do_pad
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
def snake_case ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = DonutImageProcessor if is_vision_available() else None
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = DonutImageProcessingTester(self )
@property
def snake_case ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , """do_resize""" ) )
self.assertTrue(hasattr(_snake_case , """size""" ) )
self.assertTrue(hasattr(_snake_case , """do_thumbnail""" ) )
self.assertTrue(hasattr(_snake_case , """do_align_long_axis""" ) )
self.assertTrue(hasattr(_snake_case , """do_pad""" ) )
self.assertTrue(hasattr(_snake_case , """do_normalize""" ) )
self.assertTrue(hasattr(_snake_case , """image_mean""" ) )
self.assertTrue(hasattr(_snake_case , """image_std""" ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def snake_case ( self ):
"""simple docstring"""
pass
@is_flaky()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(_snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(_snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(_snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 82
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : List[str] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase : Tuple = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
UpperCAmelCase : str = '▁'
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
__a = BarthezTokenizer
def __init__( self : Optional[int] , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=None , UpperCamelCase : Union[str, Any]="<s>" , UpperCamelCase : Any="</s>" , UpperCamelCase : Tuple="</s>" , UpperCamelCase : Tuple="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : int="<mask>" , **UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , )
__UpperCAmelCase : List[Any] = vocab_file
__UpperCAmelCase : Tuple = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : List[Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 115
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( lowercase , lowercase ) -> Optional[Any]:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowerCAmelCase = str(bin(A__ ) )[2:] # remove the leading "0b"
__lowerCAmelCase = str(bin(A__ ) )[2:] # remove the leading "0b"
__lowerCAmelCase = max(len(A__ ) , len(A__ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : List[Any] = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 46
| 0
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = split_dict._to_yaml_list()
assert len(__lowercase ) == len(__lowercase )
SCREAMING_SNAKE_CASE : List[str] = SplitDict._from_yaml_list(__lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE : str = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE : Any = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='''my_dataset''' )] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 182
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[str] ={
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any =[
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53
| 0
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __snake_case :
'''simple docstring'''
@staticmethod
def UpperCAmelCase__ ( *A : List[Any] , **A : Union[str, Any] ):
pass
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
__snake_case: str = hashlib.mda(image.tobytes())
return m.hexdigest()[:10]
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: Tuple = np.array(SCREAMING_SNAKE_CASE__)
__snake_case: Union[str, Any] = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE__), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase__ ( self : List[Any] , A : Union[str, Any] , A : str , A : Optional[int] ):
__snake_case: List[str] = MaskGenerationPipeline(model=A , image_processor=A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase__ ( self : List[Any] , A : Optional[Any] , A : str ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def UpperCAmelCase__ ( self : str ):
pass
@slow
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
__snake_case: Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
__snake_case: int = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Union[str, Any] = """facebook/sam-vit-huge"""
__snake_case: str = pipeline("""mask-generation""" , model=A )
__snake_case: int = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__snake_case: str = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
] , )
| 369
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = ort.SessionOptions()
__snake_case: List[Any] = False
return options
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: int = """A red cat sitting on a park bench"""
__snake_case: Any = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=10 , generator=A , output_type="""np""" , )
__snake_case: List[Any] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: Optional[int] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
__snake_case: List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=A , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: Optional[int] = """A red cat sitting on a park bench"""
__snake_case: Dict = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=20 , generator=A , output_type="""np""" , )
__snake_case: List[str] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Union[str, Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 293
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a__ : Any = False
class lowercase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
image=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCamelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 80
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline
lowerCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowerCamelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
__lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__lowerCamelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowerCamelCase : int = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Dict:
__lowerCamelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : int = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('RGB' )
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__lowerCamelCase : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Any = self.get_dummy_components()
__lowerCamelCase : Tuple = StableDiffusionInstructPixaPixPipeline(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = sd_pipe(**SCREAMING_SNAKE_CASE_ ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Union[str, Any] = self.get_dummy_components()
__lowerCamelCase : List[Any] = StableDiffusionInstructPixaPixPipeline(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = 'french fries'
__lowerCamelCase : List[Any] = sd_pipe(**SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = output.images
__lowerCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[Any] = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Union[str, Any] = self.get_dummy_components()
__lowerCamelCase : Any = StableDiffusionInstructPixaPixPipeline(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = [inputs['prompt']] * 2
__lowerCamelCase : Tuple = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0
__lowerCamelCase : List[str] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = image / 2 + 0.5
__lowerCamelCase : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__lowerCamelCase : Dict = image.repeat(2 , 1 , 1 , 1 )
__lowerCamelCase : int = sd_pipe(**SCREAMING_SNAKE_CASE_ ).images
__lowerCamelCase : Tuple = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__lowerCamelCase : Union[str, Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
__lowerCamelCase : str = StableDiffusionInstructPixaPixPipeline(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = sd_pipe(**SCREAMING_SNAKE_CASE_ ).images
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = [round(SCREAMING_SNAKE_CASE_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(SCREAMING_SNAKE_CASE_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : List[str] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase_ ( self ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Union[str, Any] = self.get_dummy_components()
__lowerCamelCase : Tuple = StableDiffusionInstructPixaPixPipeline(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = VaeImageProcessor(do_resize=SCREAMING_SNAKE_CASE_ , do_normalize=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = pipe(**self.get_dummy_inputs_by_type(SCREAMING_SNAKE_CASE_ , input_image_type='pt' ) )[0]
__lowerCamelCase : Optional[Any] = components['vae']
__lowerCamelCase : Dict = self.get_dummy_inputs_by_type(SCREAMING_SNAKE_CASE_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__lowerCamelCase : str = vae.encode(inputs[image_param] ).latent_dist.mode()
__lowerCamelCase : str = pipe(**SCREAMING_SNAKE_CASE_ )[0]
__lowerCamelCase : Optional[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(SCREAMING_SNAKE_CASE_ , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=0 ) -> str:
__lowerCamelCase : str = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
__lowerCamelCase : Any = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> str:
__lowerCamelCase : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
__lowerCamelCase : Optional[Any] = self.get_inputs()
__lowerCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE_ ).images
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase : Any = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
__lowerCamelCase : Optional[Any] = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**SCREAMING_SNAKE_CASE_ ).images
__lowerCamelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase : Optional[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
__lowerCamelCase : Union[str, Any] = self.get_inputs()
__lowerCamelCase : str = pipe(**SCREAMING_SNAKE_CASE_ ).images
__lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase : Union[str, Any] = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Union[str, Any] = 0
def callback_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
__lowerCamelCase : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowerCamelCase : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__lowerCamelCase : Union[str, Any] = latents[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__lowerCamelCase : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__lowerCamelCase : List[Any] = latents[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__lowerCamelCase : int = False
__lowerCamelCase : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
__lowerCamelCase : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
__lowerCamelCase : Optional[int] = self.get_inputs()
pipe(**SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase_ ( self ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
__lowerCamelCase : List[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Tuple = pipe(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Optional[int] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__lowerCamelCase : Union[str, Any] = inputs['image'].resize((5_04, 5_04) )
__lowerCamelCase : int = 'timbrooks/instruct-pix2pix'
__lowerCamelCase : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
__lowerCamelCase : Dict = pipe(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = output.images[0]
__lowerCamelCase : Optional[int] = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
__lowerCamelCase : List[str] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 185
| 0
|
"""simple docstring"""
import re
import string
import numpy as np
import datasets
lowerCAmelCase_ : Union[str, Any] = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
lowerCAmelCase_ : Dict = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
lowerCAmelCase_ : Any = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=False , ) -> int:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCAmelCase = np.array([re.sub(snake_case__ , """""" , snake_case__ ) for x in predictions] )
UpperCAmelCase = np.array([re.sub(snake_case__ , """""" , snake_case__ ) for x in references] )
else:
UpperCAmelCase = np.asarray(snake_case__ )
UpperCAmelCase = np.asarray(snake_case__ )
if ignore_case:
UpperCAmelCase = np.char.lower(snake_case__ )
UpperCAmelCase = np.char.lower(snake_case__ )
if ignore_punctuation:
UpperCAmelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation )
UpperCAmelCase = np.char.translate(snake_case__ , table=snake_case__ )
UpperCAmelCase = np.char.translate(snake_case__ , table=snake_case__ )
if ignore_numbers:
UpperCAmelCase = string.digits.maketrans("""""" , """""" , string.digits )
UpperCAmelCase = np.char.translate(snake_case__ , table=snake_case__ )
UpperCAmelCase = np.char.translate(snake_case__ , table=snake_case__ )
UpperCAmelCase = predictions == references
return {"exact_match": np.mean(snake_case__ ) * 1_00}
| 358
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCAmelCase )
UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
UpperCAmelCase = parser.parse_args()
if not hasattr(lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 248
| 0
|
from __future__ import annotations
from math import pi
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114
|
from functools import lru_cache
@lru_cache
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114
| 1
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowercase__ :str = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowercase__ :str = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
lowercase__ :Optional[Any] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def A__ ( self):
if version.parse(scb.__version__) < version.parse('''1.4.12'''):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''')
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence'''),
'''references''': datasets.Sequence(datasets.Value('''string''' ,id='''sequence''') ,id='''references'''),
}) ,codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] ,reference_urls=[
'''https://github.com/m-popovic/chrF''',
] ,)
def A__ ( self ,A__ ,A__ ,A__ = CHRF.CHAR_ORDER ,A__ = CHRF.WORD_ORDER ,A__ = CHRF.BETA ,A__ = False ,A__ = False ,A__ = False ,):
lowercase = len(references[0])
if any(len(A__) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
lowercase = [[refs[i] for refs in references] for i in range(A__)]
lowercase = CHRF(A__ ,A__ ,A__ ,A__ ,A__ ,A__)
lowercase = sb_chrf.corpus_score(A__ ,A__)
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 368
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase__ :Optional[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ :int = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase__ :List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase__ :List[str] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
lowercase = True
# Deal with multi-line cases
elif (
re.search(
Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , lowerCAmelCase__ , )
is not None
):
lowercase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowercase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowercase = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
lowercase = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
lowercase = True
if not attribute_used:
lowercase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowercase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowercase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowercase = True
elif attribute.endswith('''_token_id''' ):
lowercase = True
# configuration class specific cases
if not case_allowed:
lowercase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowercase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = dict(inspect.signature(config_class.__init__ ).parameters )
lowercase = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
lowercase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowercase = {}
if len(config_class.attribute_map ) > 0:
lowercase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowercase = inspect.getsourcefile(lowerCAmelCase__ )
lowercase = os.path.dirname(lowerCAmelCase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowercase = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for fn in os.listdir(lowerCAmelCase__ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
lowercase = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as fp:
modeling_sources.append(fp.read() )
lowercase = []
for config_param, default_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
# `attributes` here is all the variant names for `config_param`
lowercase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase__ )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowercase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCAmelCase__ : inspect.isclass(lowerCAmelCase__ )
and issubclass(lowerCAmelCase__ , lowerCAmelCase__ )
and inspect.getmodule(lowerCAmelCase__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowercase = check_config_attributes_being_used(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowercase = unused_attributes
if len(lowerCAmelCase__ ) > 0:
lowercase = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
check_config_attributes()
| 97
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : Dict =logging.get_logger(__name__)
__snake_case : Optional[int] ={
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""xlm-roberta-xl"""
def __init__(self ,__lowerCamelCase=25_08_80 ,__lowerCamelCase=25_60 ,__lowerCamelCase=36 ,__lowerCamelCase=32 ,__lowerCamelCase=1_02_40 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_14 ,__lowerCamelCase=1 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-05 ,__lowerCamelCase=1 ,__lowerCamelCase=0 ,__lowerCamelCase=2 ,__lowerCamelCase="absolute" ,__lowerCamelCase=True ,__lowerCamelCase=None ,**__lowerCamelCase ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : Tuple = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Union[str, Any] = position_embedding_type
lowerCAmelCase__ : Union[str, Any] = use_cache
lowerCAmelCase__ : str = classifier_dropout
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
@property
def lowerCAmelCase__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase__ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 129
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Optional[Any] ={
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] =[
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__snake_case : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129
| 1
|
import datasets
from .evaluate import evaluate
__SCREAMING_SNAKE_CASE : List[Any] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
__SCREAMING_SNAKE_CASE : Any = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
__SCREAMING_SNAKE_CASE : Optional[Any] = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : int = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_snake_case : Optional[Any] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_snake_case : Any = evaluate(dataset=lowercase_ , predictions=lowercase_ )
return score
| 368
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_A = logging.get_logger(__name__)
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__(self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
UpperCAmelCase__ : List[Any] = size if size is not None else {"""height""": 384, """width""": 384}
UpperCAmelCase__ : Optional[int] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = do_resize
UpperCAmelCase__ : Dict = size
UpperCAmelCase__ : Optional[Any] = resample
UpperCAmelCase__ : Optional[Any] = do_rescale
UpperCAmelCase__ : List[str] = rescale_factor
UpperCAmelCase__ : List[Any] = do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase__ : Optional[int] = do_convert_rgb
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
UpperCAmelCase__ : Dict = (size["""height"""], size["""width"""])
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : int = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Any = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Tuple = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase__ : Optional[int] = size if size is not None else self.size
UpperCAmelCase__ : List[str] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCAmelCase__ : Any = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase__ : Dict = [convert_to_rgb(_lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCAmelCase__ : Union[str, Any] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_rescale:
UpperCAmelCase__ : List[str] = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
UpperCAmelCase__ : Union[str, Any] = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
UpperCAmelCase__ : List[str] = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCAmelCase__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=_lowerCamelCase )
return encoded_outputs
| 171
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_A = logging.get_logger(__name__)
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__(self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"""shortest_edge""": 224}
UpperCAmelCase__ : List[Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCAmelCase__ : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
UpperCAmelCase__ : str = get_size_dict(_lowerCamelCase , param_name="""crop_size""" )
UpperCAmelCase__ : int = do_resize
UpperCAmelCase__ : Any = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : str = do_center_crop
UpperCAmelCase__ : Dict = crop_size
UpperCAmelCase__ : List[str] = do_flip_channel_order
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PIL.Image.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(_lowerCamelCase , size=size["""shortest_edge"""] , default_to_square=_lowerCamelCase )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
return flip_channel_order(_lowerCamelCase , data_format=_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : List[str] = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Tuple = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : str = get_size_dict(_lowerCamelCase , param_name="""crop_size""" )
UpperCAmelCase__ : List[str] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Union[str, Any] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCAmelCase__ : Tuple = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Optional[Any] = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
UpperCAmelCase__ : List[Any] = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase__ : Any = [self.flip_channel_order(image=_lowerCamelCase ) for image in images]
UpperCAmelCase__ : int = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCAmelCase__ : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = target_sizes.numpy()
UpperCAmelCase__ : Tuple = []
for idx in range(len(_lowerCamelCase ) ):
UpperCAmelCase__ : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_lowerCamelCase )
UpperCAmelCase__ : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCamelCase )
else:
UpperCAmelCase__ : str = logits.argmax(dim=1 )
UpperCAmelCase__ : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 171
| 1
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.02 , ) -> str:
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = rotary_dim
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = initializer_range
A_ = None
A_ = vocab_size - 1
A_ = vocab_size - 1
A_ = vocab_size - 1
def __A ( self ) -> Any:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __A ( self ) -> Any:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
A_ = 20
A_ = model_class_name(_SCREAMING_SNAKE_CASE )
A_ = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
A_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
A_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A_ = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
A_ = 20
A_ = model_class_name(_SCREAMING_SNAKE_CASE )
A_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
A_ = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
A_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A_ = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowercase : Optional[int] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __A ( self ) -> Optional[Any]:
A_ = FlaxGPTJModelTester(self )
def __A ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
A_ ,A_ ,A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> int:
for model_class_name in self.all_model_classes:
A_ ,A_ ,A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __A ( self ) -> str:
A_ = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
A_ = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
A_ = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
A_ = False
A_ = model.config.eos_token_id
A_ = jax.jit(model.generate )
A_ = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
A_ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
A_ = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __A ( self ) -> Union[str, Any]:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ ,A_ = pt_inputs['''input_ids'''].shape
A_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
A_ = 0
A_ = 1
A_ = 0
A_ = 1
A_ = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
A_ = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
A_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
A_ = fx_state
with torch.no_grad():
A_ = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
A_ = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
A_ = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __A ( self ) -> str:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
A_ = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
A_ = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
A_ ,A_ = pt_inputs['''input_ids'''].shape
A_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
A_ = 0
A_ = 1
A_ = 0
A_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
A_ = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
A_ = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
A_ = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __A ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
A_ = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
A_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 18
|
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _UpperCAmelCase ( _UpperCamelCase : Features ) -> Optional[int]:
A_ = np.inf
def set_batch_size(_UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_UpperCamelCase, _UpperCamelCase ) and feature.dtype == "binary":
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_UpperCamelCase, _UpperCamelCase )
return None if batch_size is np.inf else batch_size
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ = path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
A_ = Parquet(
cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __A ( self ) -> str:
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Dict:
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self ) -> int:
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
A_ = self._write(file_obj=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
return written
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
A_ = 0
A_ = parquet_writer_kwargs.pop('''path_or_buf''' , _SCREAMING_SNAKE_CASE )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _SCREAMING_SNAKE_CASE ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
A_ = query_table(
table=self.dataset._data , key=slice(_SCREAMING_SNAKE_CASE , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_SCREAMING_SNAKE_CASE )
written += batch.nbytes
writer.close()
return written
| 18
| 1
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
lowerCamelCase_ = mock.Mock()
lowerCamelCase_ = 500
lowerCamelCase_ = {}
lowerCamelCase_ = HTTPError
lowerCamelCase_ = {}
# Download this model to make sure it's in the cache.
lowerCamelCase_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowercase ) as mock_head:
lowerCamelCase_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
# A mock response for an HTTP head request to emulate server down
lowerCamelCase_ = mock.Mock()
lowerCamelCase_ = 500
lowerCamelCase_ = {}
lowerCamelCase_ = HTTPError
lowerCamelCase_ = {}
# Download this model to make sure it's in the cache.
lowerCamelCase_ = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowercase ) as mock_head:
lowerCamelCase_ = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE_( self ) -> Any:
# This test is for deprecated behavior and can be removed in v5
try:
lowerCamelCase_ = tempfile.mktemp()
with open(lowercase , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , lowercase )
lowerCamelCase_ = AlbertTokenizer.from_pretrained(lowercase )
finally:
os.remove(lowercase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , lowercase )
lowerCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def SCREAMING_SNAKE_CASE_( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase_ = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCAmelCase__ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def SCREAMING_SNAKE_CASE_( cls ) -> Tuple:
lowerCamelCase_ = TOKEN
HfFolder.save_token(lowercase )
@classmethod
def SCREAMING_SNAKE_CASE_( cls ) -> Dict:
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ = os.path.join(lowercase , "vocab.txt" )
with open(lowercase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase_ = BertTokenizer(lowercase )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
lowerCamelCase_ = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase , repo_id="test-tokenizer" , push_to_hub=lowercase , use_auth_token=self._token )
lowerCamelCase_ = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ = os.path.join(lowercase , "vocab.txt" )
with open(lowercase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase_ = BertTokenizer(lowercase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
lowerCamelCase_ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowercase , repo_id="valid_org/test-tokenizer-org" , push_to_hub=lowercase , use_auth_token=self._token )
lowerCamelCase_ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def SCREAMING_SNAKE_CASE_( self ) -> str:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ = os.path.join(lowercase , "vocab.txt" )
with open(lowercase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase_ = CustomTokenizer(lowercase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCamelCase_ = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ = os.path.join(lowercase , "vocab.txt" )
with open(lowercase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase_ = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
lowerCamelCase_ = CustomTokenizerFast.from_pretrained(lowercase )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCamelCase_ = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' , use_fast=lowercase , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
lowerCamelCase_ = Trie()
lowerCamelCase_ = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowercase , ["AB", "C"] )
| 19
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 19
| 1
|
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase (a_ :Optional[int]) -> Tuple:
lowercase :List[Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_A):
for j in range(_A):
lowercase :Tuple = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase = imread('''image_data/lena.jpg''', 1)
# convert to its negative
UpperCAmelCase = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 353
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = '''▁'''
UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
UpperCAmelCase = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
UpperCAmelCase = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
UpperCAmelCase = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class __magic_name__ ( __UpperCAmelCase ):
__A : List[str] = ["input_ids"]
__A : Optional[Any] = VOCAB_FILES_NAMES
__A : str = PRETRAINED_INIT_CONFIGURATION
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[str] = PRETRAINED_VOCAB_FILES_MAP
__A : List[str] = RESOURCE_FILES_NAMES
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : List[Any]=None , snake_case__ : int=False , snake_case__ : Optional[int]="utf8" , snake_case__ : List[str]="[UNK]" , snake_case__ : Tuple="[SEP]" , snake_case__ : List[Any]="[PAD]" , snake_case__ : Dict="[CLS]" , snake_case__ : Dict="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : str , ):
'''simple docstring'''
lowercase :Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , vocab_file=snake_case__ , encoding=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
lowercase :Dict = do_lower_case
lowercase :str = sentencepiece_model_ckpt
lowercase :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase :Tuple = self.load_vocab(filepath=snake_case__ )
else:
lowercase :str = {self.sp_model.id_to_piece(snake_case__ ): id for id in range(self.sp_model.get_piece_size() )}
lowercase :Any = {v: k for k, v in self.vocab.items()}
def __snake_case ( self : List[str] , snake_case__ : str ):
'''simple docstring'''
if text is None:
return None
lowercase :List[Any] = self.tokenize(snake_case__ )
lowercase , lowercase :List[str] = '''''', []
for i, ch in enumerate(snake_case__ ):
if ch in self.SP_CHAR_MAPPING:
lowercase :Optional[int] = self.SP_CHAR_MAPPING.get(snake_case__ )
else:
lowercase :Optional[int] = unicodedata.normalize('''NFKC''' , snake_case__ )
if self.is_whitespace(snake_case__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case__ ) )
lowercase , lowercase , lowercase :int = normalized_text, [], 0
if self.do_lower_case:
lowercase :Any = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase :Tuple = token[1:]
lowercase :List[str] = text[offset:].index(snake_case__ ) + offset
lowercase :Tuple = start + len(snake_case__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase :int = end
return token_mapping
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return len(self.vocab )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Optional[int] ):
'''simple docstring'''
lowercase :Any = self.__dict__.copy()
lowercase :Optional[int] = None
return state
def __setstate__( self : Tuple , snake_case__ : Dict ):
'''simple docstring'''
lowercase :Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase :Dict = {}
lowercase :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def __snake_case ( self : int , snake_case__ : List[Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(snake_case__ , snake_case__ ) for c in text) )
def __snake_case ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : int=False , snake_case__ : Dict=6_4 , snake_case__ : Any=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
lowercase :Any = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
lowercase :Any = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
lowercase :Optional[Any] = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
lowercase :Any = self.sp_model.EncodeAsPieces(snake_case__ )
else:
lowercase :List[Any] = self.sp_model.SampleEncodeAsPieces(snake_case__ , snake_case__ , snake_case__ )
lowercase :str = []
for pi, piece in enumerate(snake_case__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case__ ) and pi != 0:
new_pieces.append(snake_case__ )
continue
else:
continue
lowercase :int = 0
for i, chunk in enumerate(snake_case__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case__ ) or self.is_punct(snake_case__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case__ )
lowercase :Optional[int] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase :str = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase :Dict = i
if len(snake_case__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def __snake_case ( self : Dict , snake_case__ : str ):
'''simple docstring'''
lowercase :int = ''''''.join(snake_case__ ).replace(snake_case__ , ''' ''' ).strip()
return out_string
def __snake_case ( self : int , snake_case__ : str ):
'''simple docstring'''
lowercase :Tuple = self.convert_ids_to_tokens(snake_case__ )
lowercase :Any = ''''''.join(snake_case__ ).replace(snake_case__ , ''' ''' ).strip()
return out_string
def __snake_case ( self : int , snake_case__ : Union[str, Any] ):
'''simple docstring'''
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def __snake_case ( self : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(snake_case__ , self.unk_token )
def __snake_case ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase :int = [self.cls_token_id]
lowercase :str = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __snake_case ( self : Any , snake_case__ : Dict , snake_case__ : str=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __snake_case ( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1]
def __snake_case ( self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case__ ) + 1) + [1] * (len(snake_case__ ) + 3)
def __snake_case ( self : List[Any] , snake_case__ : Any ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __snake_case ( self : List[str] , snake_case__ : Any ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __snake_case ( self : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __snake_case ( self : Optional[int] , snake_case__ : List[str] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case__ ) == 1:
lowercase :str = unicodedata.category(snake_case__ )
if cat == "Zs":
return True
return False
def __snake_case ( self : str , snake_case__ : Any ):
'''simple docstring'''
lowercase :Dict = {}
with io.open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(snake_case__ ):
lowercase :Dict = line.rstrip('''\n''' )
lowercase :str = int(snake_case__ )
return token_to_idx
def __snake_case ( self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
lowercase :Optional[int] = 0
if os.path.isdir(snake_case__ ):
lowercase :str = os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase :Any = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase :Optional[int] = token_index
writer.write(token + '''\n''' )
index += 1
lowercase :int = os.path.join(snake_case__ , '''sentencepiece.bpe.model''' )
with open(snake_case__ , '''wb''' ) as fi:
lowercase :Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (vocab_file,)
| 172
| 0
|
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : int ):
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
A__ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
A__ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def UpperCamelCase ( _lowerCamelCase : list[list[float]] ):
A__ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
A__ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
A__ = [[0.0, 0.0], [0.0, 0.0]]
A__, A__ = matrix[1][1], matrix[0][0]
A__, A__ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
A__ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
A__ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
A__ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
A__ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
A__ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
A__ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
A__ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
A__ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
A__ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
A__ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
A__ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
A__ = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
A__ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
A__ = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(_lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 237
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
a__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 364
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : Tuple = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
a__ : Optional[Any] = {'''mobilebert-uncased''': 512}
a__ : List[Any] = {}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = MobileBertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ) ->Optional[int]:
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
SCREAMING_SNAKE_CASE : Optional[int] = strip_accents
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = do_lower_case
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 19
| 0
|
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
UpperCamelCase = 0
for ch in input_str:
UpperCamelCase = ord(A__ )
UpperCamelCase = pow(2 , A__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 46
| 0
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
_lowerCamelCase : List[str] = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def a_ ( __lowercase : int ) -> Optional[int]:
_snake_case = EfficientNetConfig()
_snake_case = CONFIG_MAP[model_name]['hidden_dim']
_snake_case = CONFIG_MAP[model_name]['width_coef']
_snake_case = CONFIG_MAP[model_name]['depth_coef']
_snake_case = CONFIG_MAP[model_name]['image_size']
_snake_case = CONFIG_MAP[model_name]['dropout_rate']
_snake_case = CONFIG_MAP[model_name]['dw_padding']
_snake_case = 'huggingface/label-files'
_snake_case = 'imagenet-1k-id2label.json'
_snake_case = 1_000
_snake_case = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( ) -> Any:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def a_ ( __lowercase : Union[str, Any] ) -> Tuple:
_snake_case = CONFIG_MAP[model_name]['image_size']
_snake_case = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def a_ ( __lowercase : str ) -> List[Any]:
_snake_case = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
_snake_case = sorted(set(__lowercase ) )
_snake_case = len(__lowercase )
_snake_case = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
_snake_case = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
_snake_case = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
_snake_case = {}
for item in rename_keys:
if item[0] in original_param_names:
_snake_case = 'efficientnet.' + item[1]
_snake_case = 'classifier.weight'
_snake_case = 'classifier.bias'
return key_mapping
def a_ ( __lowercase : Any , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
_snake_case = key_mapping[key]
if "_conv" in key and "kernel" in key:
_snake_case = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_snake_case = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_snake_case = torch.from_numpy(np.transpose(__lowercase ) )
else:
_snake_case = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def a_ ( __lowercase : List[Any] , __lowercase : Any , __lowercase : int , __lowercase : str ) -> Dict:
_snake_case = model_classes[model_name](
include_top=__lowercase , weights='imagenet' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_000 , classifier_activation='softmax' , )
_snake_case = original_model.trainable_variables
_snake_case = original_model.non_trainable_variables
_snake_case = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_snake_case = param.numpy()
_snake_case = list(tf_params.keys() )
# Load HuggingFace model
_snake_case = get_efficientnet_config(__lowercase )
_snake_case = EfficientNetForImageClassification(__lowercase ).eval()
_snake_case = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
_snake_case = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
_snake_case = convert_image_processor(__lowercase )
_snake_case = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
_snake_case = hf_model(**__lowercase )
_snake_case = outputs.logits.detach().numpy()
# Original model inference
_snake_case = False
_snake_case = CONFIG_MAP[model_name]['image_size']
_snake_case = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_snake_case = image.img_to_array(__lowercase )
_snake_case = np.expand_dims(__lowercase , axis=0 )
_snake_case = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
_snake_case = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
_lowerCamelCase : List[str] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 130
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def a_ ( __lowercase : str , __lowercase : List[str] , __lowercase : Dict ) -> List[str]:
if isinstance(__lowercase , torch.Tensor ):
return image
elif isinstance(__lowercase , PIL.Image.Image ):
_snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
_snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_snake_case = np.concatenate(__lowercase , axis=0 )
_snake_case = np.array(__lowercase ).astype(np.floataa ) / 2_5_5.0
_snake_case = image.transpose(0 , 3 , 1 , 2 )
_snake_case = 2.0 * image - 1.0
_snake_case = torch.from_numpy(__lowercase )
elif isinstance(image[0] , torch.Tensor ):
_snake_case = torch.cat(__lowercase , dim=0 )
return image
def a_ ( __lowercase : int , __lowercase : Any , __lowercase : List[Any] , __lowercase : Tuple=0.9_9_9_5 ) -> List[str]:
if not isinstance(__lowercase , np.ndarray ):
_snake_case = True
_snake_case = va.device
_snake_case = va.cpu().numpy()
_snake_case = va.cpu().numpy()
_snake_case = np.sum(va * va / (np.linalg.norm(__lowercase ) * np.linalg.norm(__lowercase )) )
if np.abs(__lowercase ) > DOT_THRESHOLD:
_snake_case = (1 - t) * va + t * va
else:
_snake_case = np.arccos(__lowercase )
_snake_case = np.sin(__lowercase )
_snake_case = theta_a * t
_snake_case = np.sin(__lowercase )
_snake_case = np.sin(theta_a - theta_t ) / sin_theta_a
_snake_case = sin_theta_t / sin_theta_a
_snake_case = sa * va + sa * va
if inputs_are_torch:
_snake_case = torch.from_numpy(__lowercase ).to(__lowercase )
return va
def a_ ( __lowercase : int , __lowercase : Optional[int] ) -> List[Any]:
_snake_case = F.normalize(__lowercase , dim=-1 )
_snake_case = F.normalize(__lowercase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def a_ ( __lowercase : int , __lowercase : Any ) -> Optional[Any]:
for param in model.parameters():
_snake_case = value
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : AutoencoderKL , lowercase : CLIPTextModel , lowercase : CLIPModel , lowercase : CLIPTokenizer , lowercase : UNetaDConditionModel , lowercase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , lowercase : CLIPFeatureExtractor , lowercase : Any=None , lowercase : List[str]=None , lowercase : List[str]=None , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=lowercase , text_encoder=lowercase , clip_model=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , feature_extractor=lowercase , coca_model=lowercase , coca_tokenizer=lowercase , coca_transform=lowercase , )
_snake_case = (
feature_extractor.size
if isinstance(feature_extractor.size , lowercase )
else feature_extractor.size['shortest_edge']
)
_snake_case = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowercase )
set_requires_grad(self.clip_model , lowercase )
def A ( self : Optional[int] , lowercase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def A ( self : Dict ):
'''simple docstring'''
self.enable_attention_slicing(lowercase )
def A ( self : int ):
'''simple docstring'''
set_requires_grad(self.vae , lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
set_requires_grad(self.vae , lowercase )
def A ( self : str ):
'''simple docstring'''
set_requires_grad(self.unet , lowercase )
def A ( self : Dict ):
'''simple docstring'''
set_requires_grad(self.unet , lowercase )
def A ( self : str , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = min(int(num_inference_steps * strength ) , lowercase )
_snake_case = max(num_inference_steps - init_timestep , 0 )
_snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A ( self : Optional[Any] , lowercase : List[str] , lowercase : Optional[Any] , lowercase : str , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : List[Any]=None ):
'''simple docstring'''
if not isinstance(lowercase , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(lowercase )}''' )
_snake_case = image.to(device=lowercase , dtype=lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase )
]
_snake_case = torch.cat(lowercase , dim=0 )
else:
_snake_case = self.vae.encode(lowercase ).latent_dist.sample(lowercase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_snake_case = 0.18215 * init_latents
_snake_case = init_latents.repeat_interleave(lowercase , dim=0 )
_snake_case = randn_tensor(init_latents.shape , generator=lowercase , device=lowercase , dtype=lowercase )
# get latents
_snake_case = self.scheduler.add_noise(lowercase , lowercase , lowercase )
_snake_case = init_latents
return latents
def A ( self : int , lowercase : int ):
'''simple docstring'''
_snake_case = self.coca_transform(lowercase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_snake_case = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_snake_case = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def A ( self : List[Any] , lowercase : Dict , lowercase : Any ):
'''simple docstring'''
_snake_case = self.feature_extractor.preprocess(lowercase )
_snake_case = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_snake_case = self.clip_model.get_image_features(lowercase )
_snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase )
_snake_case = image_embeddings_clip.repeat_interleave(lowercase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def A ( self : int , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : int , lowercase : Any , lowercase : Union[str, Any] , lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = latents.detach().requires_grad_()
_snake_case = self.scheduler.scale_model_input(lowercase , lowercase )
# predict the noise residual
_snake_case = self.unet(lowercase , lowercase , encoder_hidden_states=lowercase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_snake_case = self.scheduler.alphas_cumprod[timestep]
_snake_case = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_snake_case = torch.sqrt(lowercase )
_snake_case = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowercase ):
_snake_case = self.scheduler.sigmas[index]
_snake_case = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_snake_case = 1 / 0.18215 * sample
_snake_case = self.vae.decode(lowercase ).sample
_snake_case = (image / 2 + 0.5).clamp(0 , 1 )
_snake_case = transforms.Resize(self.feature_extractor_size )(lowercase )
_snake_case = self.normalize(lowercase ).to(latents.dtype )
_snake_case = self.clip_model.get_image_features(lowercase )
_snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase )
_snake_case = spherical_dist_loss(lowercase , lowercase ).mean() * clip_guidance_scale
_snake_case = -torch.autograd.grad(lowercase , lowercase )[0]
if isinstance(self.scheduler , lowercase ):
_snake_case = latents.detach() + grads * (sigma**2)
_snake_case = noise_pred_original
else:
_snake_case = noise_pred_original - torch.sqrt(lowercase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : int , lowercase : Union[torch.FloatTensor, PIL.Image.Image] , lowercase : Union[torch.FloatTensor, PIL.Image.Image] , lowercase : Optional[str] = None , lowercase : Optional[str] = None , lowercase : Optional[int] = 512 , lowercase : Optional[int] = 512 , lowercase : float = 0.6 , lowercase : Optional[int] = 50 , lowercase : Optional[float] = 7.5 , lowercase : Optional[int] = 1 , lowercase : float = 0.0 , lowercase : Optional[float] = 100 , lowercase : Optional[torch.Generator] = None , lowercase : Optional[str] = "pil" , lowercase : bool = True , lowercase : float = 0.8 , lowercase : float = 0.1 , lowercase : float = 0.1 , ):
'''simple docstring'''
if isinstance(lowercase , lowercase ) and len(lowercase ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(lowercase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(lowercase , torch.Generator ) and batch_size > 1:
_snake_case = [generator] + [None] * (batch_size - 1)
_snake_case = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_snake_case = [x[0] for x in coca_is_none if x[1]]
_snake_case = ', '.join(lowercase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowercase ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_snake_case = self.get_image_description(lowercase )
if style_prompt is None:
if len(lowercase ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_snake_case = self.get_image_description(lowercase )
# get prompt text embeddings for content and style
_snake_case = self.tokenizer(
lowercase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=lowercase , return_tensors='pt' , )
_snake_case = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_snake_case = self.tokenizer(
lowercase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=lowercase , return_tensors='pt' , )
_snake_case = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_snake_case = slerp(lowercase , lowercase , lowercase )
# duplicate text embeddings for each generation per prompt
_snake_case = text_embeddings.repeat_interleave(lowercase , dim=0 )
# set timesteps
_snake_case = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_snake_case = {}
if accepts_offset:
_snake_case = 1
self.scheduler.set_timesteps(lowercase , **lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_snake_case , _snake_case = self.get_timesteps(lowercase , lowercase , self.device )
_snake_case = timesteps[:1].repeat(lowercase )
# Preprocess image
_snake_case = preprocess(lowercase , lowercase , lowercase )
_snake_case = self.prepare_latents(
lowercase , lowercase , lowercase , text_embeddings.dtype , self.device , lowercase )
_snake_case = preprocess(lowercase , lowercase , lowercase )
_snake_case = self.prepare_latents(
lowercase , lowercase , lowercase , text_embeddings.dtype , self.device , lowercase )
_snake_case = slerp(lowercase , lowercase , lowercase )
if clip_guidance_scale > 0:
_snake_case = self.get_clip_image_embeddings(lowercase , lowercase )
_snake_case = self.get_clip_image_embeddings(lowercase , lowercase )
_snake_case = slerp(
lowercase , lowercase , lowercase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_snake_case = content_text_input.input_ids.shape[-1]
_snake_case = self.tokenizer([''] , padding='max_length' , max_length=lowercase , return_tensors='pt' )
_snake_case = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_snake_case = uncond_embeddings.repeat_interleave(lowercase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_snake_case = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_snake_case = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_snake_case = torch.randn(lowercase , generator=lowercase , device='cpu' , dtype=lowercase ).to(
self.device )
else:
_snake_case = torch.randn(lowercase , generator=lowercase , device=self.device , dtype=lowercase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_snake_case = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_snake_case = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_snake_case = {}
if accepts_eta:
_snake_case = eta
# check if the scheduler accepts generator
_snake_case = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_snake_case = generator
with self.progress_bar(total=lowercase ):
for i, t in enumerate(lowercase ):
# expand the latents if we are doing classifier free guidance
_snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_snake_case = self.scheduler.scale_model_input(lowercase , lowercase )
# predict the noise residual
_snake_case = self.unet(lowercase , lowercase , encoder_hidden_states=lowercase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_snake_case , _snake_case = noise_pred.chunk(2 )
_snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_snake_case = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_snake_case , _snake_case = self.cond_fn(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_snake_case = 1 / 0.18215 * latents
_snake_case = self.vae.decode(lowercase ).sample
_snake_case = (image / 2 + 0.5).clamp(0 , 1 )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(lowercase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowercase , nsfw_content_detected=lowercase )
| 130
| 1
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self , _A , _A = True , _A = None , _A = 3_2 , _A = True , _A = 1 / 2_5_5 , _A = True , _A = True , _A = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _A = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _A = True , _A=7 , _A=3_0 , _A=4_0_0 , _A=3 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = do_resize
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_8_8}
__lowerCAmelCase = size_divisor
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = do_pad
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __SCREAMING_SNAKE_CASE( self , _A , _A=False ):
"""simple docstring"""
if not batched:
__lowerCAmelCase = self.size['shortest_edge']
__lowerCAmelCase = image_inputs[0]
if isinstance(_A , Image.Image ):
__lowerCAmelCase = image.size
else:
__lowerCAmelCase = image.shape[1], image.shape[2]
__lowerCAmelCase = size / min(_A , _A )
if h < w:
__lowerCAmelCase = size, scale * w
else:
__lowerCAmelCase = scale * h, size
__lowerCAmelCase = int((1_3_3_3 / 8_0_0) * size )
if max(_A , _A ) > max_size:
__lowerCAmelCase = max_size / max(_A , _A )
__lowerCAmelCase = newh * scale
__lowerCAmelCase = neww * scale
__lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 )
__lowerCAmelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(_A , key=lambda _A : item[0] )[0]
__lowerCAmelCase = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
_a : int = BridgeTowerImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = BridgeTowerImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , "image_mean" ) )
self.assertTrue(hasattr(_A , "image_std" ) )
self.assertTrue(hasattr(_A , "do_normalize" ) )
self.assertTrue(hasattr(_A , "do_resize" ) )
self.assertTrue(hasattr(_A , "size" ) )
self.assertTrue(hasattr(_A , "size_divisor" ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCAmelCase = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(_A , return_tensors="pt" ).pixel_values
__lowerCAmelCase = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCAmelCase = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(_A , return_tensors="pt" ).pixel_values
__lowerCAmelCase = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCAmelCase = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(_A , return_tensors="pt" ).pixel_values
__lowerCAmelCase = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 92
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a__ :
def __init__( self : str, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Optional[Any]=13, lowerCAmelCase : str=7, lowerCAmelCase : Union[str, Any]=True, lowerCAmelCase : Optional[int]=True, lowerCAmelCase : Dict=True, lowerCAmelCase : List[str]=True, lowerCAmelCase : List[Any]=99, lowerCAmelCase : Tuple=32, lowerCAmelCase : int=2, lowerCAmelCase : Dict=4, lowerCAmelCase : List[str]=37, lowerCAmelCase : Any="gelu", lowerCAmelCase : Optional[int]=0.1, lowerCAmelCase : Tuple=0.1, lowerCAmelCase : Optional[int]=512, lowerCAmelCase : Dict=16, lowerCAmelCase : Tuple=2, lowerCAmelCase : Union[str, Any]=0.02, lowerCAmelCase : str=3, lowerCAmelCase : Any=4, lowerCAmelCase : List[str]=None, lowerCAmelCase : Union[str, Any]=1000, ) -> Dict:
lowercase : Optional[Any] = parent
lowercase : Tuple = batch_size
lowercase : List[Any] = seq_length
lowercase : List[str] = is_training
lowercase : Optional[Any] = use_input_mask
lowercase : Optional[int] = use_token_type_ids
lowercase : List[Any] = use_labels
lowercase : Optional[Any] = vocab_size
lowercase : int = hidden_size
lowercase : Union[str, Any] = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : str = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : str = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_vocab_size
lowercase : Optional[int] = type_sequence_label_size
lowercase : str = initializer_range
lowercase : Any = num_labels
lowercase : List[Any] = num_choices
lowercase : Optional[int] = scope
lowercase : str = range_bbox
def lowercase ( self : str ) -> Optional[int]:
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase : str = bbox[i, j, 3]
lowercase : Tuple = bbox[i, j, 1]
lowercase : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase : Optional[int] = bbox[i, j, 2]
lowercase : List[str] = bbox[i, j, 0]
lowercase : Union[str, Any] = t
lowercase : Any = tf.convert_to_tensor(lowerCAmelCase )
lowercase : Optional[Any] = None
if self.use_input_mask:
lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_token_type_ids:
lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase : Dict = None
lowercase : List[str] = None
lowercase : List[Any] = None
if self.use_labels:
lowercase : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase : Optional[int] = ids_tensor([self.batch_size], self.num_choices )
lowercase : Tuple = LayoutLMConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : int, lowerCAmelCase : Tuple, lowerCAmelCase : Optional[Any], lowerCAmelCase : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[Any], lowerCAmelCase : Tuple, lowerCAmelCase : List[Any] ) -> Dict:
lowercase : Dict = TFLayoutLMModel(config=lowerCAmelCase )
lowercase : str = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
lowercase : Union[str, Any] = model(lowerCAmelCase, lowerCAmelCase, token_type_ids=lowerCAmelCase )
lowercase : Any = model(lowerCAmelCase, lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def lowercase ( self : Tuple, lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[int], lowerCAmelCase : Tuple, lowerCAmelCase : List[str] ) -> Any:
lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=lowerCAmelCase )
lowercase : List[Any] = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Any, lowerCAmelCase : Tuple, lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Dict, lowerCAmelCase : List[str], lowerCAmelCase : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : int ) -> List[str]:
lowercase : Optional[Any] = self.num_labels
lowercase : Optional[int] = TFLayoutLMForSequenceClassification(config=lowerCAmelCase )
lowercase : Tuple = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase ( self : Tuple, lowerCAmelCase : Union[str, Any], lowerCAmelCase : List[str], lowerCAmelCase : Optional[int], lowerCAmelCase : List[str], lowerCAmelCase : List[str], lowerCAmelCase : int, lowerCAmelCase : Optional[Any], lowerCAmelCase : Dict ) -> Dict:
lowercase : Optional[int] = self.num_labels
lowercase : int = TFLayoutLMForTokenClassification(config=lowerCAmelCase )
lowercase : List[str] = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : Any, lowerCAmelCase : Optional[Any], lowerCAmelCase : List[str], lowerCAmelCase : List[Any], lowerCAmelCase : List[Any], lowerCAmelCase : Any, lowerCAmelCase : str, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Tuple ) -> Optional[Any]:
lowercase : List[str] = TFLayoutLMForQuestionAnswering(config=lowerCAmelCase )
lowercase : Optional[int] = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase ( self : Tuple ) -> Union[str, Any]:
lowercase : Any = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_lowerCamelCase = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = 10
def lowercase ( self : Tuple ) -> int:
lowercase : int = TFLayoutLMModelTester(self )
lowercase : int = ConfigTester(self, config_class=lowerCAmelCase, hidden_size=37 )
def lowercase ( self : List[str] ) -> Dict:
self.config_tester.run_common_tests()
def lowercase ( self : str ) -> List[Any]:
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase ( self : List[Any] ) -> Tuple:
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def lowercase ( self : int ) -> List[str]:
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def lowercase ( self : Dict ) -> int:
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
def lowercase ( self : List[str] ) -> Any:
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
@slow
def lowercase ( self : Dict ) -> List[Any]:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = TFLayoutLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def lowercase ( self : List[Any] ) -> List[Any]:
pass
def lowercase__ ( ) -> str:
'''simple docstring'''
lowercase : Any = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
lowercase : List[Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowercase : List[str] = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
lowercase : Dict = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowercase : List[Any] = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a__ ( unittest.TestCase ):
@slow
def lowercase ( self : Optional[int] ) -> str:
lowercase : Any = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowercase , lowercase , lowercase , lowercase , lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : List[str] = model(input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
# test the sequence output on [0, :3, :3]
lowercase : Dict = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]], )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], lowerCAmelCase, atol=1e-3 ) )
# test the pooled output on [1, :3]
lowercase : Any = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3], lowerCAmelCase, atol=1e-3 ) )
@slow
def lowercase ( self : List[Any] ) -> Any:
# initialize model with randomly initialized sequence classification head
lowercase : List[str] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased', num_labels=2 )
lowercase , lowercase , lowercase , lowercase , lowercase : Any = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : Optional[int] = model(
input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=tf.convert_to_tensor([1, 1] ), )
# test whether we get a loss as a scalar
lowercase : List[str] = outputs.loss
lowercase : List[Any] = (2,)
self.assertEqual(loss.shape, lowerCAmelCase )
# test the shape of the logits
lowercase : str = outputs.logits
lowercase : List[str] = (2, 2)
self.assertEqual(logits.shape, lowerCAmelCase )
@slow
def lowercase ( self : List[Any] ) -> str:
# initialize model with randomly initialized token classification head
lowercase : Tuple = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased', num_labels=13 )
lowercase , lowercase , lowercase , lowercase , lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : List[str] = model(
input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
# test the shape of the logits
lowercase : Union[str, Any] = outputs.logits
lowercase : Union[str, Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape, lowerCAmelCase )
@slow
def lowercase ( self : Union[str, Any] ) -> int:
# initialize model with randomly initialized token classification head
lowercase : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowercase , lowercase , lowercase , lowercase , lowercase : Any = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : int = model(input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
# test the shape of the logits
lowercase : str = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape, lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape, lowerCAmelCase )
| 255
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def __lowercase ( *_a : Optional[int] ,**_a : Optional[int] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowercase ( self : Dict ,_a : int ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Dict = ObjectDetectionPipeline(model=_a ,image_processor=_a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowercase ( self : Union[str, Any] ,_a : List[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : List[str] = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' ,threshold=0.0 )
self.assertGreater(len(_a ) ,0 )
for detected_object in outputs:
self.assertEqual(
_a ,{
'score': ANY(_a ),
'label': ANY(_a ),
'box': {'xmin': ANY(_a ), 'ymin': ANY(_a ), 'xmax': ANY(_a ), 'ymax': ANY(_a )},
} ,)
import datasets
_a : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' ,'image' ,split='test' )
_a : List[Any] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
_a : str = object_detector(_a ,threshold=0.0 )
self.assertEqual(len(_a ) ,len(_a ) )
for outputs in batch_outputs:
self.assertGreater(len(_a ) ,0 )
for detected_object in outputs:
self.assertEqual(
_a ,{
'score': ANY(_a ),
'label': ANY(_a ),
'box': {'xmin': ANY(_a ), 'ymin': ANY(_a ), 'xmax': ANY(_a ), 'ymax': ANY(_a )},
} ,)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def __lowercase ( self : int ):
'''simple docstring'''
pass
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = 'hf-internal-testing/tiny-detr-mobilenetsv3'
_a : int = AutoModelForObjectDetection.from_pretrained(_a )
_a : Optional[Any] = AutoFeatureExtractor.from_pretrained(_a )
_a : int = ObjectDetectionPipeline(model=_a ,feature_extractor=_a )
_a : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ,threshold=0.0 )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] ,)
_a : Dict = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] ,)
@require_torch
@slow
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Any = 'facebook/detr-resnet-50'
_a : List[str] = AutoModelForObjectDetection.from_pretrained(_a )
_a : str = AutoFeatureExtractor.from_pretrained(_a )
_a : Union[str, Any] = ObjectDetectionPipeline(model=_a ,feature_extractor=_a )
_a : int = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] ,)
_a : str = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] ,)
@require_torch
@slow
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Tuple = 'facebook/detr-resnet-50'
_a : List[Any] = pipeline('object-detection' ,model=_a )
_a : List[str] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] ,)
_a : List[str] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] ,)
@require_torch
@slow
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Optional[Any] = 0.9985
_a : List[str] = 'facebook/detr-resnet-50'
_a : List[Any] = pipeline('object-detection' ,model=_a )
_a : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ,threshold=_a )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] ,)
@require_torch
@require_pytesseract
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = 'Narsil/layoutlmv3-finetuned-funsd'
_a : Tuple = 0.9993
_a : Optional[Any] = pipeline('object-detection' ,model=_a ,threshold=_a )
_a : List[Any] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] ,)
| 5
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5
| 1
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class a__ ( _UpperCAmelCase ):
"""simple docstring"""
@slow
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
A__ = BertTokenizer.from_pretrained("bert-base-uncased" )
A__ = bertabert.config.encoder.vocab_size
A__ = tokenizer.sep_token_id
A__ = tokenizer.cls_token_id
A__ = 128
A__ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
A__ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
A__ = train_dataset.select(range(32 ) )
A__ = val_dataset.select(range(16 ) )
A__ = 4
def _map_to_encoder_decoder_inputs(lowercase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
A__ = tokenizer(batch["article"] , padding="max_length" , truncation=__UpperCAmelCase , max_length=512 )
A__ = tokenizer(batch["highlights"] , padding="max_length" , truncation=__UpperCAmelCase , max_length=128 )
A__ = inputs.input_ids
A__ = inputs.attention_mask
A__ = outputs.input_ids
A__ = outputs.input_ids.copy()
A__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
A__ = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowercase ):
A__ = pred.label_ids
A__ = pred.predictions
# all unnecessary tokens are removed
A__ = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
A__ = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
A__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
A__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
A__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
A__ = self.get_auto_remove_tmp_dir()
A__ = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy="steps" , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
A__ = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 68
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340
| 0
|
from __future__ import annotations
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : int ) -> list[str]:
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
A_ : Optional[Any] = number_of_bytes // partitions
A_ : Any = []
for i in range(_lowerCAmelCase ):
A_ : Any = i * bytes_per_partition + 1
A_ : Optional[Any] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"{start_bytes}-{end_bytes}" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger('''transformers.models.speecht5''')
_lowerCAmelCase : int = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
_lowerCAmelCase : str = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
_lowerCAmelCase : int = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
_lowerCAmelCase : Union[str, Any] = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
_lowerCAmelCase : Union[str, Any] = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
_lowerCAmelCase : int = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
_lowerCAmelCase : Any = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
_lowerCAmelCase : List[str] = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
_lowerCAmelCase : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_lowerCAmelCase : Dict = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Tuple = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
_lowerCAmelCase : Tuple = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
_lowerCAmelCase : int = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
_lowerCAmelCase : Optional[int] = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ) -> Optional[Any]:
for attribute in key.split("." ):
A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
A_ : Tuple = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
A_ : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
A_ : Dict = value
elif weight_type == "weight_g":
A_ : int = value
elif weight_type == "weight_v":
A_ : str = value
elif weight_type == "bias":
A_ : int = value
elif weight_type == "running_mean":
A_ : str = value
elif weight_type == "running_var":
A_ : Any = value
elif weight_type == "num_batches_tracked":
A_ : str = value
else:
A_ : int = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> Union[str, Any]:
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_ , A_ : Tuple = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
A_ : Tuple = []
if task == "s2t":
A_ : Union[str, Any] = hf_model.speechta.encoder.prenet.feature_encoder
A_ : str = MAPPING_S2T
A_ : Union[str, Any] = IGNORE_KEYS_S2T
elif task == "t2s":
A_ : Optional[int] = None
A_ : Dict = MAPPING_T2S
A_ : Any = IGNORE_KEYS_T2S
elif task == "s2s":
A_ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
A_ : Dict = MAPPING_S2S
A_ : List[str] = IGNORE_KEYS_S2S
else:
raise ValueError(f"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(_lowerCAmelCase , _lowerCAmelCase ):
logger.info(f"{name} was ignored" )
continue
A_ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
A_ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A_ , A_ : Optional[Any] = key.split(".*." )
if prefix in name and suffix in name:
A_ : int = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A_ : str = True
if "*" in mapped_key:
A_ : List[str] = name.split(_lowerCAmelCase )[0].split("." )[-2]
A_ : Optional[int] = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
A_ : Union[str, Any] = "weight_g"
elif "weight_v" in name:
A_ : List[Any] = "weight_v"
elif "bias" in name:
A_ : Tuple = "bias"
elif "weight" in name:
A_ : List[Any] = "weight"
elif "running_mean" in name:
A_ : Union[str, Any] = "running_mean"
elif "running_var" in name:
A_ : Union[str, Any] = "running_var"
elif "num_batches_tracked" in name:
A_ : List[Any] = "num_batches_tracked"
else:
A_ : Optional[Any] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] ) -> List[Any]:
A_ : int = full_name.split("conv_layers." )[-1]
A_ : Optional[Any] = name.split("." )
A_ : List[Any] = int(items[0] )
A_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
A_ : Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
A_ : Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
A_ : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
A_ : Union[str, Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : int=None , ) -> Optional[Any]:
if config_path is not None:
A_ : Dict = SpeechTaConfig.from_pretrained(_lowerCAmelCase )
else:
A_ : Optional[int] = SpeechTaConfig()
if task == "s2t":
A_ : Optional[Any] = config.max_text_positions
A_ : Optional[int] = SpeechTaForSpeechToText(_lowerCAmelCase )
elif task == "t2s":
A_ : str = 1876
A_ : List[str] = 600
A_ : List[str] = config.max_speech_positions
A_ : Tuple = SpeechTaForTextToSpeech(_lowerCAmelCase )
elif task == "s2s":
A_ : Optional[int] = 1876
A_ : int = config.max_speech_positions
A_ : Union[str, Any] = SpeechTaForSpeechToSpeech(_lowerCAmelCase )
else:
raise ValueError(f"Unknown task name: {task}" )
if vocab_path:
A_ : int = SpeechTaTokenizer(_lowerCAmelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A_ : str = AddedToken("<mask>" , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )
A_ : int = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
A_ : int = SpeechTaFeatureExtractor()
A_ : Optional[Any] = SpeechTaProcessor(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
A_ : Union[str, Any] = torch.load(_lowerCAmelCase )
recursively_load_weights(fairseq_checkpoint["model"] , _lowerCAmelCase , _lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 70
| 1
|
from collections import deque
def _a ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = deque()
UpperCamelCase__ : str = [False for _ in range(SCREAMING_SNAKE_CASE )]
UpperCamelCase__ : List[Any] = [-1 for _ in range(SCREAMING_SNAKE_CASE )]
UpperCamelCase__ : Optional[int] = index_of[:]
def strong_connect(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ):
UpperCamelCase__ : Any = index # the number when this node is seen
UpperCamelCase__ : Any = index # lowest rank node reachable from here
index += 1
stack.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = True
for w in g[v]:
if index_of[w] == -1:
UpperCamelCase__ : Union[str, Any] = strong_connect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCamelCase__ : Tuple = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCamelCase__ : Any = []
UpperCamelCase__ : List[str] = stack.pop()
UpperCamelCase__ : Optional[int] = False
component.append(SCREAMING_SNAKE_CASE )
while w != v:
UpperCamelCase__ : Tuple = stack.pop()
UpperCamelCase__ : Optional[int] = False
component.append(SCREAMING_SNAKE_CASE )
components.append(SCREAMING_SNAKE_CASE )
return index
UpperCamelCase__ : Optional[Any] = []
for v in range(SCREAMING_SNAKE_CASE ):
if index_of[v] == -1:
strong_connect(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE )
return components
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = [[] for _ in range(SCREAMING_SNAKE_CASE )]
for u, v in edges:
g[u].append(SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
# Test
__UpperCamelCase : List[str] = 7
__UpperCamelCase : Dict = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__UpperCamelCase : Any = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__UpperCamelCase : Union[str, Any] = [(u, v) for u, v in zip(source, target)]
__UpperCamelCase : int = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 146
|
import os
import sys
__UpperCamelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__UpperCamelCase : Tuple = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _a ( *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _a ( *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def _a ( *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _a ( *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _a ( *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _a ( *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _a ( *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 146
| 1
|
_snake_case : str = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
_snake_case : int = frozenset(['prompt', 'negative_prompt'])
_snake_case : List[Any] = frozenset([])
_snake_case : Optional[Any] = frozenset(['image'])
_snake_case : List[str] = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
_snake_case : List[str] = frozenset(['image'])
_snake_case : Optional[int] = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
_snake_case : Union[str, Any] = frozenset(['prompt', 'image', 'negative_prompt'])
_snake_case : List[str] = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
_snake_case : int = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
_snake_case : Optional[Any] = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
_snake_case : Optional[Any] = frozenset(['image', 'mask_image'])
_snake_case : Tuple = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
_snake_case : List[Any] = frozenset(['example_image', 'image', 'mask_image'])
_snake_case : Optional[int] = frozenset(['class_labels'])
_snake_case : Tuple = frozenset(['class_labels'])
_snake_case : Optional[int] = frozenset(['batch_size'])
_snake_case : Dict = frozenset([])
_snake_case : Optional[int] = frozenset(['batch_size'])
_snake_case : str = frozenset([])
_snake_case : Union[str, Any] = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
_snake_case : Union[str, Any] = frozenset(['prompt', 'negative_prompt'])
_snake_case : Tuple = frozenset(['input_tokens'])
_snake_case : List[str] = frozenset(['input_tokens'])
| 207
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Optional[int]:
__lowerCAmelCase = 1_0
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = [1, 2, 3, 4]
__lowerCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCAmelCase_ , self.block_size , 0 ) , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(lowerCAmelCase_ , self.block_size , 0 ) , lowerCAmelCase_ )
def lowercase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(lowerCAmelCase_ , self.block_size , 0 ) , lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
__lowerCAmelCase , __lowerCAmelCase = process_story(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , [] )
def lowercase ( self : Any ) -> str:
__lowerCAmelCase = ''
__lowerCAmelCase , __lowerCAmelCase = process_story(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , [] )
self.assertEqual(lowerCAmelCase_ , [] )
def lowercase ( self : int ) -> int:
__lowerCAmelCase = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
__lowerCAmelCase , __lowerCAmelCase = process_story(lowerCAmelCase_ )
__lowerCAmelCase = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = ['It was the best of times.']
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = torch.tensor([1, 2, 3, 4] )
__lowerCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase_ , 0 ).numpy() , expected.numpy() )
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
__lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase_ , 2_3 ).numpy() , expected.numpy() )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase_ , 1 ).numpy() , expected.numpy() )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase = 1_0_1
__lowerCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
__lowerCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowerCAmelCase = compute_token_type_ids(lowerCAmelCase_ , lowerCAmelCase_ )
np.testing.assert_array_equal(lowerCAmelCase_ , lowerCAmelCase_ )
| 207
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Any ={
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _lowercase ( _lowercase ):
a = """pegasus"""
a = ["""past_key_values"""]
a = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: Optional[Any] , UpperCamelCase__: Dict=50_265 , UpperCamelCase__: Tuple=1_024 , UpperCamelCase__: Dict=12 , UpperCamelCase__: List[Any]=4_096 , UpperCamelCase__: Any=16 , UpperCamelCase__: Optional[int]=12 , UpperCamelCase__: Optional[Any]=4_096 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=0.0 , UpperCamelCase__: List[Any]=0.0 , UpperCamelCase__: Tuple=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Dict="gelu" , UpperCamelCase__: Optional[Any]=1_024 , UpperCamelCase__: int=0.1 , UpperCamelCase__: List[Any]=0.0 , UpperCamelCase__: List[Any]=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: List[Any]=0 , UpperCamelCase__: Any=False , UpperCamelCase__: Any=0 , UpperCamelCase__: str=1 , UpperCamelCase__: Union[str, Any]=1 , **UpperCamelCase__: Any , ):
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : List[Any] = d_model
lowerCamelCase__ : Union[str, Any] = encoder_ffn_dim
lowerCamelCase__ : Dict = encoder_layers
lowerCamelCase__ : Optional[int] = encoder_attention_heads
lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCamelCase__ : Tuple = decoder_layers
lowerCamelCase__ : Optional[int] = decoder_attention_heads
lowerCamelCase__ : Any = dropout
lowerCamelCase__ : str = attention_dropout
lowerCamelCase__ : str = activation_dropout
lowerCamelCase__ : Dict = activation_function
lowerCamelCase__ : int = init_std
lowerCamelCase__ : Union[str, Any] = encoder_layerdrop
lowerCamelCase__ : List[Any] = decoder_layerdrop
lowerCamelCase__ : List[str] = use_cache
lowerCamelCase__ : Optional[int] = encoder_layers
lowerCamelCase__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
@property
def lowerCamelCase_ ( self: str ):
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self: str ):
return self.d_model
| 41
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :torch.FloatTensor
class lowercase__ ( snake_case__, snake_case__ ):
@register_to_config
def __init__( self : Optional[int] , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : Tuple[str] = ("DownEncoderBlock2D",) , snake_case__ : Tuple[str] = ("UpDecoderBlock2D",) , snake_case__ : Tuple[int] = (64,) , snake_case__ : int = 1 , snake_case__ : str = "silu" , snake_case__ : int = 3 , snake_case__ : int = 32 , snake_case__ : int = 256 , snake_case__ : int = 32 , snake_case__ : Optional[int] = None , snake_case__ : float = 0.18_215 , snake_case__ : str = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCamelCase_ : List[str] =Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
lowerCamelCase_ : Union[str, Any] =vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase_ : List[Any] =nn.Convad(snake_case__ , snake_case__ , 1 )
lowerCamelCase_ : int =VectorQuantizer(snake_case__ , snake_case__ , beta=0.25 , remap=snake_case__ , sane_index_shape=snake_case__ )
lowerCamelCase_ : int =nn.Convad(snake_case__ , snake_case__ , 1 )
# pass init params to Decoder
lowerCamelCase_ : Union[str, Any] =Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , norm_type=snake_case__ , )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : int =self.encoder(snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.quant_conv(snake_case__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case__ )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = False , snake_case__ : bool = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =self.quantize(snake_case__ )
else:
lowerCamelCase_ : List[Any] =h
lowerCamelCase_ : List[Any] =self.post_quant_conv(snake_case__ )
lowerCamelCase_ : Dict =self.decoder(snake_case__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : Dict =sample
lowerCamelCase_ : Optional[Any] =self.encode(snake_case__ ).latents
lowerCamelCase_ : str =self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 144
| 0
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase_ = float('''nan''')
class __lowerCAmelCase :
def __init__(self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = sys.stdout
snake_case_ : Tuple = open(__magic_name__ , '''a''' )
def __getattr__(self , __magic_name__ ) -> List[str]:
'''simple docstring'''
return getattr(self.stdout , __magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> List[str]:
'''simple docstring'''
self.stdout.write(__magic_name__ )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , __magic_name__ , 0 , re.M ) )
def lowerCamelCase_ ( _UpperCamelCase=80 , _UpperCamelCase=False ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = []
# deal with critical env vars
snake_case_ : List[str] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
snake_case_ : List[str] = os.environ.get(_UpperCamelCase , _UpperCamelCase )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
snake_case_ : List[Any] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(_UpperCamelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
snake_case_ : Optional[Any] = []
snake_case_ : Dict = ''''''
while len(_UpperCamelCase ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCamelCase )
snake_case_ : Tuple = ''''''
return "\\\n".join(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Dict = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
snake_case_ : List[str] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
snake_case_ : List[str] = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
snake_case_ : Optional[int] = subprocess.run(_UpperCamelCase , capture_output=_UpperCamelCase , text=_UpperCamelCase )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
snake_case_ : Tuple = variation.replace(''' ''' , '''-''' )
with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ : Optional[int] = json.load(_UpperCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = []
snake_case_ : Optional[int] = []
snake_case_ : Dict = f'''{id}: {variation:<{longest_variation_len}}'''
snake_case_ : List[str] = f'''{preamble}: '''
snake_case_ : str = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCamelCase ) , desc=_UpperCamelCase , leave=_UpperCamelCase ):
snake_case_ : Optional[int] = process_run_single(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case_ : List[str] = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCamelCase ):
metrics.append(_UpperCamelCase )
results.append(_UpperCamelCase )
outcome += "✓"
else:
outcome += "✘"
snake_case_ : Tuple = f'''\33[2K\r{outcome}'''
if len(_UpperCamelCase ) > 0:
snake_case_ : int = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
snake_case_ : int = round(mean_metrics[target_metric_key] , 2 )
snake_case_ : Optional[Any] = f'''{outcome} {mean_target}'''
if len(_UpperCamelCase ) > 1:
results_str += f''' {tuple(round(_UpperCamelCase , 2 ) for x in results )}'''
print(_UpperCamelCase )
snake_case_ : Optional[int] = variation
return mean_metrics
else:
print(_UpperCamelCase )
return {variation_key: variation, target_metric_key: nan}
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : str = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : str = pd.DataFrame(_UpperCamelCase )
snake_case_ : str = '''variation'''
snake_case_ : int = '''diff_%'''
snake_case_ : List[Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
snake_case_ : Dict = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCamelCase ):
# as a fallback, use the minimal value as the sentinel
snake_case_ : int = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCamelCase ):
snake_case_ : List[str] = df.apply(
lambda _UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
snake_case_ : List[Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
snake_case_ : Optional[int] = df.reindex(_UpperCamelCase , axis='''columns''' ) # reorder cols
# capitalize
snake_case_ : Tuple = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
snake_case_ : Tuple = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
snake_case_ : Dict = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
snake_case_ : Dict = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )]
print('''\n\n'''.join(_UpperCamelCase ) )
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=_UpperCamelCase , type=_UpperCamelCase , nargs='''+''' , required=_UpperCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=_UpperCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=_UpperCamelCase , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=_UpperCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=_UpperCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
snake_case_ : Union[str, Any] = parser.parse_args()
snake_case_ : str = args.output_dir
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
snake_case_ : Optional[int] = get_base_command(_UpperCamelCase , _UpperCamelCase )
# split each dimension into its --foo variations
snake_case_ : Tuple = [list(map(str.strip , re.split(R'''\|''' , _UpperCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
snake_case_ : Dict = list(map(str.strip , map(''' '''.join , itertools.product(*_UpperCamelCase ) ) ) )
snake_case_ : int = max(len(_UpperCamelCase ) for x in variations )
# split wanted keys
snake_case_ : Optional[Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
snake_case_ : Optional[int] = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
snake_case_ : List[str] = Tee(_UpperCamelCase )
print(f'''\n*** Running {len(_UpperCamelCase )} benchmarks:''' )
print(f'''Base command: {" ".join(_UpperCamelCase )}''' )
snake_case_ : Dict = '''variation'''
snake_case_ : Optional[int] = []
for id, variation in enumerate(tqdm(_UpperCamelCase , desc='''Total completion: ''' , leave=_UpperCamelCase ) ):
snake_case_ : Optional[int] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.repeat_times , _UpperCamelCase , args.verbose , ) )
process_results(_UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.base_variation , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279
|
from math import isclose, sqrt
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, float, float]:
"""simple docstring"""
snake_case_ : Dict = point_y / 4 / point_x
snake_case_ : List[str] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
snake_case_ : Union[str, Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
snake_case_ : Tuple = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
snake_case_ : Union[str, Any] = outgoing_gradient**2 + 4
snake_case_ : Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
snake_case_ : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
snake_case_ : Dict = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
snake_case_ : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
snake_case_ : Any = x_minus if isclose(_UpperCamelCase , _UpperCamelCase ) else x_plus
snake_case_ : int = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCamelCase_ ( _UpperCamelCase = 1.4 , _UpperCamelCase = -9.6 ) -> int:
"""simple docstring"""
snake_case_ : int = 0
snake_case_ : float = first_x_coord
snake_case_ : float = first_y_coord
snake_case_ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
snake_case_ , snake_case_ , snake_case_ : str = next_point(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 279
| 1
|
from collections import defaultdict
from math import ceil, sqrt
def _snake_case ( lowerCAmelCase : int = 1_0_0_0_0_0_0 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : defaultdict = defaultdict(lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
SCREAMING_SNAKE_CASE_ : str = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18
| 1
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
while a != 0:
_UpperCAmelCase , _UpperCAmelCase = b % a, a
return b
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if gcd(_lowerCAmelCase , _lowerCAmelCase ) != 1:
_UpperCAmelCase = f'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1, 0, a
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0, 1, m
while va != 0:
_UpperCAmelCase = ua // va
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 354
|
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=0.999 , _SCREAMING_SNAKE_CASE : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_UpperCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 1
@register_to_config
def __init__( self : List[Any] , __UpperCamelCase : int = 1_0_0_0 , __UpperCamelCase : float = 0.0_0_0_1 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : str = "linear" , __UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = 0 , __UpperCamelCase : str = "epsilon" , __UpperCamelCase : float = 1.0 , **__UpperCamelCase : Optional[int] , )->Dict:
if kwargs.get('''set_alpha_to_one''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_UpperCAmelCase = torch.tensor(__UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase = torch.linspace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase = betas_for_alpha_bar(__UpperCamelCase )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_UpperCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = torch.from_numpy(np.arange(0 , __UpperCamelCase ).copy().astype(np.intaa ) )
def lowercase__ ( self : str , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : Optional[int] = None )->torch.FloatTensor:
return sample
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Union[str, torch.device] = None )->Any:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.' )
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase = (np.arange(0 , __UpperCamelCase ) * step_ratio).round().copy().astype(np.intaa )
_UpperCAmelCase = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
self.timesteps += self.config.steps_offset
def lowercase__ ( self : Any , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float = 0.0 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : bool = True , )->Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
_UpperCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_UpperCAmelCase = self.alphas_cumprod[timestep]
_UpperCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_UpperCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_UpperCAmelCase = model_output
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
_UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_UpperCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_UpperCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def __len__( self : Any )->str:
return self.config.num_train_timesteps
| 326
| 0
|
"""simple docstring"""
import logging
from transformers import PretrainedConfig
a : Tuple = logging.getLogger(__name__)
a : Dict = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] ="""bertabs"""
def __init__( self , lowerCAmelCase__=3_0522 , lowerCAmelCase__=512 , lowerCAmelCase__=6 , lowerCAmelCase__=512 , lowerCAmelCase__=8 , lowerCAmelCase__=512 , lowerCAmelCase__=0.2 , lowerCAmelCase__=6 , lowerCAmelCase__=768 , lowerCAmelCase__=8 , lowerCAmelCase__=2048 , lowerCAmelCase__=0.2 , **lowerCAmelCase__ , ) -> int:
super().__init__(**lowerCAmelCase__ )
a : Dict = vocab_size
a : str = max_pos
a : str = enc_layers
a : int = enc_hidden_size
a : Tuple = enc_heads
a : Dict = enc_ff_size
a : List[Any] = enc_dropout
a : str = dec_layers
a : Dict = dec_hidden_size
a : Union[str, Any] = dec_heads
a : Union[str, Any] = dec_ff_size
a : List[Any] = dec_dropout
| 105
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : Optional[Any] = 32
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
return int(x / 2**20 )
class __lowercase :
"""simple docstring"""
def __enter__( self ) -> Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCamelCase = torch.cuda.memory_allocated()
return self
def __exit__( self , *A ) -> int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
lowerCamelCase = torch.cuda.memory_allocated()
lowerCamelCase = torch.cuda.max_memory_allocated()
lowerCamelCase = bamb(self.end - self.begin )
lowerCamelCase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCamelCase ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : int = 16 , lowerCamelCase__ : str = "bert-base-cased" , lowerCamelCase__ : int = 320 , lowerCamelCase__ : int = 160 , ):
'''simple docstring'''
lowerCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ )
lowerCamelCase = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": f'train[:{n_train}]', """validation""": f'validation[:{n_val}]'} )
def tokenize_function(lowerCamelCase__ : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple ):
'''simple docstring'''
lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config["""lr"""]
lowerCamelCase = int(config["""num_epochs"""] )
lowerCamelCase = int(config["""seed"""] )
lowerCamelCase = int(config["""batch_size"""] )
lowerCamelCase = args.model_name_or_path
set_seed(lowerCamelCase__ )
lowerCamelCase , lowerCamelCase = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , return_dict=lowerCamelCase__ )
# Instantiate optimizer
lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase = optimizer_cls(params=model.parameters() , lr=lowerCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCamelCase = 1
lowerCamelCase = (len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=0 , num_training_steps=lowerCamelCase__ , )
else:
lowerCamelCase = DummyScheduler(lowerCamelCase__ , total_num_steps=lowerCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase = 0
# Now we train the model
lowerCamelCase = {}
for epoch in range(lowerCamelCase__ , lowerCamelCase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
lowerCamelCase = model(**lowerCamelCase__ )
lowerCamelCase = outputs.loss
lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase__ , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=lowerCamelCase__ , default=320 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=lowerCamelCase__ , default=160 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase__ , default=1 , help="""Number of train epochs.""" , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 252
| 0
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return x + 2
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """x = 3"""
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
assert result == 3
self.assertDictEqual(lowerCamelCase__ ,{"""x""": 3} )
SCREAMING_SNAKE_CASE = """x = y"""
SCREAMING_SNAKE_CASE = {"""y""": 5}
SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{"""x""": 5, """y""": 5} )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """y = add_two(x)"""
SCREAMING_SNAKE_CASE = {"""x""": 3}
SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ ,{"""add_two""": add_two} ,state=lowerCamelCase__ )
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
assert result is None
assert "tried to execute add_two" in out.out
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """x = 3"""
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
assert result == 3
self.assertDictEqual(lowerCamelCase__ ,{"""x""": 3} )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}"""
SCREAMING_SNAKE_CASE = {"""x""": 3}
SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ ,{"""add_two""": add_two} ,state=lowerCamelCase__ )
self.assertDictEqual(lowerCamelCase__ ,{"""x""": 3, """y""": 5} )
self.assertDictEqual(lowerCamelCase__ ,{"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """x = 3\ny = 5"""
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{"""x""": 3, """y""": 5} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """text = f'This is x: {x}.'"""
SCREAMING_SNAKE_CASE = {"""x""": 3}
SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowerCamelCase__ ,{"""x""": 3, """text""": """This is x: 3."""} )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """if x <= 3:\n y = 2\nelse:\n y = 5"""
SCREAMING_SNAKE_CASE = {"""x""": 3}
SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowerCamelCase__ ,{"""x""": 3, """y""": 2} )
SCREAMING_SNAKE_CASE = {"""x""": 8}
SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{"""x""": 8, """y""": 5} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]"""
SCREAMING_SNAKE_CASE = {"""x""": 3}
SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ ,{"""add_two""": add_two} ,state=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,[3, 5] )
self.assertDictEqual(lowerCamelCase__ ,{"""x""": 3, """test_list""": [3, 5]} )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """y = x"""
SCREAMING_SNAKE_CASE = {"""x""": 3}
SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
assert result == 3
self.assertDictEqual(lowerCamelCase__ ,{"""x""": 3, """y""": 3} )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]\ntest_list[1]"""
SCREAMING_SNAKE_CASE = {"""x""": 3}
SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ ,{"""add_two""": add_two} ,state=lowerCamelCase__ )
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{"""x""": 3, """test_list""": [3, 5]} )
SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
SCREAMING_SNAKE_CASE = {"""x""": 3}
SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ ,{"""add_two""": add_two} ,state=lowerCamelCase__ )
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """x = 0\nfor i in range(3):\n x = i"""
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ ,{"""range""": range} ,state=lowerCamelCase__ )
assert result == 2
self.assertDictEqual(lowerCamelCase__ ,{"""x""": 2, """i""": 2} )
| 362
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Any = BertTokenizer
__snake_case : Dict = BertTokenizerFast
__snake_case : Tuple = True
__snake_case : List[Any] = True
__snake_case : Optional[Any] = filter_non_english
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase__ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
# With lower casing
SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
SCREAMING_SNAKE_CASE = """a\n'll !!to?'d of, can't."""
SCREAMING_SNAKE_CASE = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCamelCase__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ,lowerCamelCase__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCamelCase__ ,"""do_lower_case""" ) else False
SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ )
]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 193
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( a__ ):
__UpperCAmelCase = ['image_processor', 'tokenizer']
__UpperCAmelCase = 'ViltImageProcessor'
__UpperCAmelCase = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , a=None , a=None , **a ):
UpperCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
UpperCamelCase__ = kwargs.pop("feature_extractor" )
UpperCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
UpperCamelCase__ = self.image_processor
def __call__( self , a , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = None , a = False , a = False , a = False , a = False , a = True , a = None , **a , ):
UpperCamelCase__ = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel_values + pixel_mask
UpperCamelCase__ = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def __a ( self , *a , **a ):
return self.tokenizer.batch_decode(*a , **a )
def __a ( self , *a , **a ):
return self.tokenizer.decode(*a , **a )
@property
def __a ( self ):
UpperCamelCase__ = self.tokenizer.model_input_names
UpperCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __a ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def __a ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 80
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19
| 0
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300
| 1
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __lowerCAmelCase ( __lowerCamelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = params
__UpperCamelCase = np.array(__lowercase )
__UpperCamelCase = np.array([len(__lowercase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def UpperCAmelCase ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.params.max_model_input_size
__UpperCamelCase = self.lengths > max_len
logger.info(F'Splitting {sum(__lowercase )} too long sequences.' )
def divide_chunks(__UpperCAmelCase , __UpperCAmelCase ):
return [l[i : i + n] for i in range(0 , len(__lowercase ) , __lowercase )]
__UpperCamelCase = []
__UpperCamelCase = []
if self.params.mlm:
__UpperCamelCase = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__UpperCamelCase = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__UpperCamelCase = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__UpperCamelCase = np.insert(__lowercase , 0 , __lowercase )
if sub_s[-1] != sep_id:
__UpperCamelCase = np.insert(__lowercase , len(__lowercase ) , __lowercase )
assert len(__lowercase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowercase )
new_tok_ids.extend(__lowercase )
new_lengths.extend([len(__lowercase ) for l in sub_seqs] )
__UpperCamelCase = np.array(__lowercase )
__UpperCamelCase = np.array(__lowercase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self )
__UpperCamelCase = self.lengths > 11
__UpperCamelCase = self.token_ids[indices]
__UpperCamelCase = self.lengths[indices]
__UpperCamelCase = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__UpperCamelCase = self.params.special_tok_ids['''unk_token''']
__UpperCamelCase = len(self )
__UpperCamelCase = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__UpperCamelCase = (unk_occs / self.lengths) < 0.5
__UpperCamelCase = self.token_ids[indices]
__UpperCamelCase = self.lengths[indices]
__UpperCamelCase = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = [t[0] for t in batch]
__UpperCamelCase = [t[1] for t in batch]
assert len(__lowercase ) == len(__lowercase )
# Max for paddings
__UpperCamelCase = max(__lowercase )
# Pad token ids
if self.params.mlm:
__UpperCamelCase = self.params.special_tok_ids['''pad_token''']
else:
__UpperCamelCase = self.params.special_tok_ids['''unk_token''']
__UpperCamelCase = [list(t.astype(__lowercase ) ) + [pad_idx] * (max_seq_len_ - len(__lowercase )) for t in token_ids]
assert len(tk_ ) == len(__lowercase )
assert all(len(__lowercase ) == max_seq_len_ for t in tk_ )
__UpperCamelCase = torch.tensor(tk_ ) # (bs, max_seq_len_)
__UpperCamelCase = torch.tensor(__lowercase ) # (bs)
return tk_t, lg_t
| 316
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A__ = logging.get_logger(__name__)
A__ = {'''vocab_file''': '''spiece.model'''}
A__ = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class a ( __lowerCamelCase ):
def __init__( self :Union[str, Any] ,__lowercase :Optional[Any] ,__lowercase :int=False ,__lowercase :int=True ,__lowercase :Optional[int]=False ,__lowercase :List[str]="<s>" ,__lowercase :str="</s>" ,__lowercase :Dict="<unk>" ,__lowercase :Optional[int]="<sep>" ,__lowercase :Tuple="<pad>" ,__lowercase :Union[str, Any]="<cls>" ,__lowercase :Dict="<mask>" ,__lowercase :List[Any]=["<eop>", "<eod>"] ,__lowercase :Optional[Dict[str, Any]] = None ,**__lowercase :int ,):
snake_case__ : Any = AddedToken(__lowercase ,lstrip=__lowercase ,rstrip=__lowercase ) if isinstance(__lowercase ,__lowercase ) else mask_token
snake_case__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase ,remove_space=__lowercase ,keep_accents=__lowercase ,bos_token=__lowercase ,eos_token=__lowercase ,unk_token=__lowercase ,sep_token=__lowercase ,pad_token=__lowercase ,cls_token=__lowercase ,mask_token=__lowercase ,additional_special_tokens=__lowercase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowercase ,)
snake_case__ : Optional[Any] = 3
snake_case__ : List[str] = do_lower_case
snake_case__ : Union[str, Any] = remove_space
snake_case__ : Tuple = keep_accents
snake_case__ : List[str] = vocab_file
snake_case__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
snake_case__ : List[Any] = jieba
snake_case__ : Union[str, Any] = str.maketrans(''' \n''' ,'''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowerCamelCase ( self :Union[str, Any] ):
return len(self.sp_model )
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[Any] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :List[str] ):
snake_case__ : Optional[Any] = self.__dict__.copy()
snake_case__ : Optional[int] = None
return state
def __setstate__( self :int ,__lowercase :str ):
snake_case__ : Tuple = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
snake_case__ : List[Any] = {}
snake_case__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self :Dict ,__lowercase :Optional[int] ):
if self.remove_space:
snake_case__ : int = ''' '''.join(inputs.strip().split() )
else:
snake_case__ : Tuple = inputs
snake_case__ : List[Any] = outputs.replace('''``''' ,'''"''' ).replace('''\'\'''' ,'''"''' )
if not self.keep_accents:
snake_case__ : Any = unicodedata.normalize('''NFKD''' ,__lowercase )
snake_case__ : Dict = ''''''.join([c for c in outputs if not unicodedata.combining(__lowercase )] )
if self.do_lower_case:
snake_case__ : str = outputs.lower()
return outputs
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ):
snake_case__ : Dict = self.preprocess_text(__lowercase )
snake_case__ : Any = self.sp_model.encode(__lowercase ,out_type=__lowercase )
snake_case__ : List[str] = []
for piece in pieces:
if len(__lowercase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
snake_case__ : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowercase ,'''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case__ : Optional[Any] = cur_pieces[1:]
else:
snake_case__ : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowercase )
else:
new_pieces.append(__lowercase )
return new_pieces
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Dict ):
return self.sp_model.PieceToId(__lowercase )
def __lowerCamelCase ( self :Tuple ,__lowercase :Optional[int] ):
return self.sp_model.IdToPiece(__lowercase )
def __lowerCamelCase ( self :Tuple ,__lowercase :List[Any] ):
snake_case__ : Union[str, Any] = ''''''.join(__lowercase ).replace(__lowercase ,''' ''' ).strip()
return out_string
def __lowerCamelCase ( self :List[Any] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : Optional[Any] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self :str ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ,__lowercase :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase ,token_ids_a=__lowercase ,already_has_special_tokens=__lowercase )
if token_ids_a is not None:
return ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1, 1]
return ([0] * len(__lowercase )) + [1, 1]
def __lowerCamelCase ( self :List[str] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : List[Any] = [self.sep_token_id]
snake_case__ : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self :Optional[int] ,__lowercase :str ,__lowercase :Optional[str] = None ):
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : Union[str, Any] = os.path.join(
__lowercase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase ,'''wb''' ) as fi:
snake_case__ : str = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
def __lowerCamelCase ( self :Union[str, Any] ,*__lowercase :Optional[int] ,**__lowercase :Dict ):
snake_case__ : Dict = super()._decode(*__lowercase ,**__lowercase )
snake_case__ : List[Any] = text.replace(''' ''' ,'''''' ).replace('''\u2582''' ,''' ''' ).replace('''\u2583''' ,'''\n''' )
return text
| 230
| 0
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {"""tokenizer_file""": """tokenizer.json"""}
__lowercase = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Dict = VOCAB_FILES_NAMES
UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
UpperCAmelCase : Any = None
def __init__( self : List[Any] , __UpperCAmelCase : str=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : Optional[int]="</s>" , __UpperCAmelCase : str="<pad>" , __UpperCAmelCase : Any=False , __UpperCAmelCase : Tuple=False , **__UpperCAmelCase : Tuple , ):
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , **__UpperCAmelCase , )
a : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , __UpperCAmelCase) != add_prefix_space:
a : Union[str, Any] = getattr(__UpperCAmelCase , pre_tok_state.pop("type"))
a : str = add_prefix_space
a : Tuple = pre_tok_class(**__UpperCAmelCase)
a : Union[str, Any] = add_prefix_space
def __snake_case ( self : Any , *__UpperCAmelCase : Any , **__UpperCAmelCase : List[str]):
a : str = kwargs.get("is_split_into_words" , __UpperCAmelCase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs.")
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : str):
a : int = kwargs.get("is_split_into_words" , __UpperCAmelCase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs.")
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None):
a : str = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase)
return tuple(__UpperCAmelCase)
def __snake_case ( self : List[str] , __UpperCAmelCase : "Conversation"):
a : Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase) + [self.eos_token_id])
if len(__UpperCAmelCase) > self.model_max_length:
a : Any = input_ids[-self.model_max_length :]
return input_ids
| 226
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ , A_=False )-> int:
'''simple docstring'''
a : List[str] = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowercase ( A_ , A_ , A_=False )-> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
a : Tuple = ""
else:
a : Dict = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a : str = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
a : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
a : Tuple = in_proj_bias[: config.hidden_size]
a : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a : int = in_proj_weight[
-config.hidden_size :, :
]
a : int = in_proj_bias[-config.hidden_size :]
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : Dict = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
def lowercase ( A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
a : List[Any] = dct.pop(A_ )
a : str = val
def lowercase ( )-> List[Any]:
'''simple docstring'''
a : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Tuple = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def lowercase ( A_ , A_ , A_=False )-> Union[str, Any]:
'''simple docstring'''
a : Optional[Any] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=A_ , )
a : Union[str, Any] = ViTHybridConfig(backbone_config=A_ , image_size=384 , num_labels=1_000 )
a : Optional[Any] = False
# load original model from timm
a : Any = timm.create_model(A_ , pretrained=A_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a : Optional[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(A_ )
a : int = create_rename_keys(A_ , A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , A_ , A_ )
a : Union[str, Any] = "huggingface/label-files"
a : Optional[int] = "imagenet-1k-id2label.json"
a : str = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Optional[Any] = {int(A_ ): v for k, v in idalabel.items()}
a : str = idalabel
a : Any = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
a : List[Any] = ViTHybridModel(A_ ).eval()
else:
a : Optional[int] = ViTHybridForImageClassification(A_ ).eval()
model.load_state_dict(A_ )
# create image processor
a : Tuple = create_transform(**resolve_data_config({} , model=A_ ) )
a : List[Any] = transform.transforms
a : int = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
a : str = ViTHybridImageProcessor(
do_resize=A_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=A_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
a : List[Any] = prepare_img()
a : Optional[Any] = transform(A_ ).unsqueeze(0 )
a : str = processor(A_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(A_ , A_ )
# verify logits
with torch.no_grad():
a : Dict = model(A_ )
a : Tuple = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
a : str = timm_model.forward_features(A_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A_ , outputs.pooler_output , atol=1e-3 )
else:
a : int = timm_model(A_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A_ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A_ )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
__lowercase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 226
| 1
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
@staticmethod
def __A (*UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
SCREAMING_SNAKE_CASE__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
_lowercase =ObjectDetectionPipeline(model=UpperCAmelCase , image_processor=UpperCAmelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
_lowercase =object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(UpperCAmelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
UpperCAmelCase , {
'''score''': ANY(UpperCAmelCase ),
'''label''': ANY(UpperCAmelCase ),
'''box''': {'''xmin''': ANY(UpperCAmelCase ), '''ymin''': ANY(UpperCAmelCase ), '''xmax''': ANY(UpperCAmelCase ), '''ymax''': ANY(UpperCAmelCase )},
} , )
import datasets
_lowercase =datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
_lowercase =[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
_lowercase =object_detector(UpperCAmelCase , threshold=0.0 )
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(UpperCAmelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
UpperCAmelCase , {
'''score''': ANY(UpperCAmelCase ),
'''label''': ANY(UpperCAmelCase ),
'''box''': {'''xmin''': ANY(UpperCAmelCase ), '''ymin''': ANY(UpperCAmelCase ), '''xmax''': ANY(UpperCAmelCase ), '''ymax''': ANY(UpperCAmelCase )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def __A (self ) -> Union[str, Any]:
pass
@require_torch
def __A (self ) -> Union[str, Any]:
_lowercase ='''hf-internal-testing/tiny-detr-mobilenetsv3'''
_lowercase =AutoModelForObjectDetection.from_pretrained(UpperCAmelCase )
_lowercase =AutoFeatureExtractor.from_pretrained(UpperCAmelCase )
_lowercase =ObjectDetectionPipeline(model=UpperCAmelCase , feature_extractor=UpperCAmelCase )
_lowercase =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
] , )
_lowercase =object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
] , )
@require_torch
@slow
def __A (self ) -> Optional[int]:
_lowercase ='''facebook/detr-resnet-50'''
_lowercase =AutoModelForObjectDetection.from_pretrained(UpperCAmelCase )
_lowercase =AutoFeatureExtractor.from_pretrained(UpperCAmelCase )
_lowercase =ObjectDetectionPipeline(model=UpperCAmelCase , feature_extractor=UpperCAmelCase )
_lowercase =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
_lowercase =object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def __A (self ) -> Dict:
_lowercase ='''facebook/detr-resnet-50'''
_lowercase =pipeline('''object-detection''' , model=UpperCAmelCase )
_lowercase =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
_lowercase =object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def __A (self ) -> Any:
_lowercase =0.9985
_lowercase ='''facebook/detr-resnet-50'''
_lowercase =pipeline('''object-detection''' , model=UpperCAmelCase )
_lowercase =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=UpperCAmelCase )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def __A (self ) -> Tuple:
_lowercase ='''Narsil/layoutlmv3-finetuned-funsd'''
_lowercase =0.9993
_lowercase =pipeline('''object-detection''' , model=UpperCAmelCase , threshold=UpperCAmelCase )
_lowercase =object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
] , )
| 5
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5
| 1
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = ["image_processor", "tokenizer"]
__UpperCAmelCase : Any = "Pix2StructImageProcessor"
__UpperCAmelCase : Union[str, Any] = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : int ) -> str:
__snake_case : List[str] = False
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase : bool = True , lowerCamelCase : Union[bool, str, PaddingStrategy] = False , lowerCamelCase : Union[bool, str, TruncationStrategy] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = 2048 , lowerCamelCase : int = 0 , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[str, TensorType]] = None , **lowerCamelCase : Dict , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
__snake_case : Any = self.tokenizer
__snake_case : int = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__snake_case : Optional[Any] = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , max_patches=lowerCamelCase , **lowerCamelCase )
else:
# add pixel_values and bbox
__snake_case : List[str] = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , max_patches=lowerCamelCase , header_text=lowerCamelCase , **lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
__snake_case : int = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
if "attention_mask" in text_encoding:
__snake_case : Dict = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
__snake_case : List[str] = text_encoding.pop("input_ids" )
else:
__snake_case : str = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def __snake_case ( self : str , *lowerCamelCase : Any , **lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Dict , *lowerCamelCase : int , **lowerCamelCase : str ) -> Optional[Any]:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Any ) -> List[Any]:
__snake_case : Optional[int] = self.tokenizer.model_input_names
__snake_case : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 134
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_snake_case : Union[str, Any] = datasets.load_iris()
_snake_case : Tuple = np.array(data["data"])
_snake_case : int = np.array(data["target"])
_snake_case : int = data["target_names"]
_snake_case , _snake_case , _snake_case , _snake_case : Any = train_test_split(X, y)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return np.linalg.norm(np.array(__lowerCamelCase ) - np.array(__lowerCamelCase ) )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=5 ):
__snake_case : Optional[Any] = zip(__lowerCamelCase , __lowerCamelCase )
# List of distances of all points from the point to be classified
__snake_case : Optional[int] = []
for data_point in data:
__snake_case : Union[str, Any] = euclidean_distance(data_point[0] , __lowerCamelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__snake_case : Dict = [i[1] for i in sorted(__lowerCamelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__snake_case : Any = Counter(__lowerCamelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 134
| 1
|
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 324
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Tuple = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : List[Any] = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[Any] = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 263
|
"""simple docstring"""
from math import factorial
UpperCamelCase : List[Any] = {str(d): factorial(d) for d in range(1_0)}
def A ( snake_case :int ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case ) )
def A ( ) -> int:
__UpperCamelCase = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case ) if sum_of_digit_factorial(snake_case ) == i )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 263
| 1
|
def A ( _lowerCamelCase = 1_000 ):
'''simple docstring'''
_lowerCAmelCase : str = 1, 1
_lowerCAmelCase : Any = 2
while True:
_lowerCAmelCase : str = 0
_lowerCAmelCase : Tuple = fa + fa
_lowerCAmelCase : Tuple = fa, f
index += 1
for _ in str(__lowerCamelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 36
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] ={
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 223
| 0
|
from timeit import timeit
def lowerCamelCase__ ( a ) -> int:
if number < 0:
raise ValueError('''the value of input must not be negative''' )
_A: List[str] = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase__ ( a ) -> int:
if number < 0:
raise ValueError('''the value of input must not be negative''' )
_A: Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase__ ( ) -> None:
def do_benchmark(a ) -> None:
_A: Tuple = '''import __main__ as z'''
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(a ) = }""" )
_A: List[Any] = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=a )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(a ) = }""" )
_A: str = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=a , )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 301
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = '''mobilenet_v1'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : str=2_2_4 , lowerCAmelCase_ : List[str]=1.0 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Tuple="relu6" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=0.999 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : List[Any]=0.001 , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_A: Any = num_channels
_A: Optional[int] = image_size
_A: Optional[Any] = depth_multiplier
_A: Tuple = min_depth
_A: Any = hidden_act
_A: Dict = tf_padding
_A: List[Any] = classifier_dropout_prob
_A: Tuple = initializer_range
_A: Tuple = layer_norm_eps
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return 1e-4
| 301
| 1
|
"""simple docstring"""
from __future__ import annotations
def _A (__a , __a ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[list[int]] = []
create_all_state(1 , __a , __a , [] , __a )
return result
def _A (__a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__a , total_number - level + 2 ):
current_list.append(__a )
create_all_state(i + 1 , __a , level - 1 , __a , __a )
current_list.pop()
def _A (__a ) -> None:
"""simple docstring"""
for i in total_list:
print(*__a )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : int = 2
UpperCAmelCase_ : str = generate_all_combinations(n, k)
print_all_state(total_list)
| 91
|
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63
| 0
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class UpperCamelCase_ :
lowerCAmelCase_ = BlenderbotConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = '''gelu'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=20 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
def lowerCAmelCase ( self ) -> Dict:
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_snake_case = prepare_blenderbot_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = TFBlenderbotModel(config=lowerCAmelCase_ ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = inputs_dict['head_mask']
_snake_case = 1
# first forward pass
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1E-3 )
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Union[str, Any]=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = TFBlenderbotModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
@require_tokenizers
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
lowerCAmelCase_ = ['''My friends are cool but they eat too many carbs.''']
lowerCAmelCase_ = '''facebook/blenderbot-400M-distill'''
@cached_property
def lowerCAmelCase ( self ) -> Dict:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase ( self ) -> int:
_snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.tokenizer(self.src_text , return_tensors='tf' )
_snake_case = self.model.generate(
model_inputs.input_ids , )
_snake_case = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 295
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 13 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 128 , lowerCAmelCase_=[16, 32, 64, 128] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 37 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 10 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 128 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = encoder_stride
_snake_case = num_attention_outputs
_snake_case = embed_dim
_snake_case = embed_dim + 1
_snake_case = resolution
_snake_case = depths
_snake_case = hidden_sizes
_snake_case = dim
_snake_case = mlp_expansion_ratio
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> Tuple:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = self.type_sequence_label_size
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerModelTester(self )
_snake_case = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowerCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowerCAmelCase ( self ) -> Optional[Any]:
pass
def lowerCAmelCase ( self ) -> str:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_snake_case = seq_length * self.model_tester.chunk_length
else:
_snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> str:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase ( self ) -> Dict:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_snake_case = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_snake_case = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_snake_case = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 295
| 1
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Any = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , _UpperCAmelCase ).groups()[0]
class a__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Union[str, Any], lowerCAmelCase : Tuple, lowerCAmelCase : Tuple=None, lowerCAmelCase : List[Any]=None ) -> Optional[Any]:
lowercase : str = file_names
lowercase : Optional[Any] = image_transform
lowercase : int = label_to_id
def __len__( self : List[Any] ) -> Any:
return len(self.file_names )
def __getitem__( self : str, lowerCAmelCase : Optional[int] ) -> Optional[Any]:
lowercase : List[Any] = self.file_names[idx]
lowercase : Tuple = PIL.Image.open(lowerCAmelCase )
lowercase : Tuple = raw_image.convert('RGB' )
if self.image_transform is not None:
lowercase : Optional[Any] = self.image_transform(lowerCAmelCase )
lowercase : Any = extract_label(lowerCAmelCase )
if self.label_to_id is not None:
lowercase : List[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if args.with_tracking:
lowercase : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowercase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : Union[str, Any] = config['lr']
lowercase : Any = int(config['num_epochs'] )
lowercase : Union[str, Any] = int(config['seed'] )
lowercase : List[Any] = int(config['batch_size'] )
lowercase : str = config['image_size']
if not isinstance(_UpperCAmelCase , (list, tuple) ):
lowercase : Dict = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowercase : Dict = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowercase : Dict = int(args.checkpointing_steps )
else:
raise ValueError(
f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
lowercase : Tuple = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowercase : Optional[int] = os.path.split(_UpperCAmelCase )[-1].split('.' )[0]
accelerator.init_trackers(_UpperCAmelCase , _UpperCAmelCase )
# Grab all the image filenames
lowercase : Optional[Any] = [os.path.join(args.data_dir , _UpperCAmelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowercase : str = [extract_label(_UpperCAmelCase ) for fname in file_names]
lowercase : List[Any] = list(set(_UpperCAmelCase ) )
id_to_label.sort()
lowercase : Optional[Any] = {lbl: i for i, lbl in enumerate(_UpperCAmelCase )}
# Set the seed before splitting the data.
np.random.seed(_UpperCAmelCase )
torch.manual_seed(_UpperCAmelCase )
torch.cuda.manual_seed_all(_UpperCAmelCase )
# Split our filenames between train and validation
lowercase : List[Any] = np.random.permutation(len(_UpperCAmelCase ) )
lowercase : Optional[Any] = int(0.8 * len(_UpperCAmelCase ) )
lowercase : int = random_perm[:cut]
lowercase : Any = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowercase : Dict = Compose([RandomResizedCrop(_UpperCAmelCase , scale=(0.5, 1.0) ), ToTensor()] )
lowercase : List[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# For evaluation, we use a deterministic Resize
lowercase : List[Any] = Compose([Resize(_UpperCAmelCase ), ToTensor()] )
lowercase : List[str] = PetsDataset([file_names[i] for i in eval_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# Instantiate dataloaders.
lowercase : Dict = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
lowercase : Any = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : List[Any] = create_model('resnet50d' , pretrained=_UpperCAmelCase , num_classes=len(_UpperCAmelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : Union[str, Any] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowercase : Dict = False
for param in model.get_classifier().parameters():
lowercase : Dict = True
# We normalize the batches of images to be a bit faster.
lowercase : int = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowercase : Dict = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowercase : Any = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowercase : List[Any] = OneCycleLR(optimizer=_UpperCAmelCase , max_lr=_UpperCAmelCase , epochs=_UpperCAmelCase , steps_per_epoch=len(_UpperCAmelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase : Tuple = 0
# We also need to keep track of the starting epoch so files are named properly
lowercase : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
lowercase : Any = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowercase : List[str] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowercase : Dict = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowercase : Any = os.path.splitext(_UpperCAmelCase )[0]
if "epoch" in training_difference:
lowercase : List[Any] = int(training_difference.replace('epoch_' , '' ) ) + 1
lowercase : List[Any] = None
else:
lowercase : Optional[Any] = int(training_difference.replace('step_' , '' ) )
lowercase : int = resume_step // len(_UpperCAmelCase )
resume_step -= starting_epoch * len(_UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase , _UpperCAmelCase ):
model.train()
if args.with_tracking:
lowercase : str = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowercase : Any = accelerator.skip_first_batches(_UpperCAmelCase , _UpperCAmelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowercase : Union[str, Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase : List[str] = (batch['image'] - mean) / std
lowercase : Union[str, Any] = model(_UpperCAmelCase )
lowercase : Optional[int] = torch.nn.functional.cross_entropy(_UpperCAmelCase , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Union[str, Any] = f'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowercase : Optional[Any] = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
model.eval()
lowercase : int = 0
lowercase : List[Any] = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase : List[str] = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase : Optional[Any] = (batch['image'] - mean) / std
with torch.no_grad():
lowercase : int = model(_UpperCAmelCase )
lowercase : Tuple = outputs.argmax(dim=-1 )
lowercase , lowercase : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['label']) )
lowercase : Union[str, Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowercase : List[str] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}: {1_00 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 1_00 * eval_metric,
'train_loss': total_loss.item() / len(_UpperCAmelCase ),
'epoch': epoch,
} , step=_UpperCAmelCase , )
if checkpointing_steps == "epoch":
lowercase : str = f'''epoch_{epoch}'''
if args.output_dir is not None:
lowercase : Any = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
if args.with_tracking:
accelerator.end_training()
def lowercase__ ( ) -> Tuple:
'''simple docstring'''
lowercase : str = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=_UpperCAmelCase , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=_UpperCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_UpperCAmelCase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowercase : int = parser.parse_args()
lowercase : List[Any] = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 2_24}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 255
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase: List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Union[str, Any] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Optional[int] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_UpperCamelCase: Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 255
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =1
A__ : Optional[Any] =3
A__ : int =(32, 32)
A__ : Union[str, Any] =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
return image
@property
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A__ : Union[str, Any] =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCAmelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A__ : int =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
A__ : Any =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[int] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Dict =self.dummy_cond_unet_upscale
A__ : List[str] =DDPMScheduler()
A__ : Union[str, Any] =DDIMScheduler(prediction_type="""v_prediction""" )
A__ : Union[str, Any] =self.dummy_vae
A__ : Any =self.dummy_text_encoder
A__ : Tuple =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : List[Any] =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : Tuple =Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A__ : int =StableDiffusionUpscalePipeline(
unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , max_noise_level=3_50 , )
A__ : int =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int ="""A painting of a squirrel eating a burger"""
A__ : List[str] =torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
A__ : Union[str, Any] =sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
A__ : List[Any] =output.images
A__ : Dict =torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
A__ : Union[str, Any] =sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCAmelCase_ , )[0]
A__ : int =image[0, -3:, -3:, -1]
A__ : Dict =image_from_tuple[0, -3:, -3:, -1]
A__ : Optional[int] =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
A__ : int =np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Optional[int] =self.dummy_cond_unet_upscale
A__ : Optional[int] =DDPMScheduler()
A__ : List[Any] =DDIMScheduler(prediction_type="""v_prediction""" )
A__ : List[Any] =self.dummy_vae
A__ : Dict =self.dummy_text_encoder
A__ : Union[str, Any] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : List[str] =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : List[str] =Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A__ : Union[str, Any] =StableDiffusionUpscalePipeline(
unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , max_noise_level=3_50 , )
A__ : Tuple =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int ="""A painting of a squirrel eating a burger"""
A__ : Optional[Any] =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
A__ : List[Any] =output.images
assert image.shape[0] == 2
A__ : Optional[int] =torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
A__ : Optional[int] =sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
A__ : Union[str, Any] =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
A__ : Any =self.dummy_cond_unet_upscale
A__ : Any =DDPMScheduler()
A__ : Optional[Any] =DDIMScheduler(prediction_type="""v_prediction""" )
A__ : Tuple =self.dummy_vae
A__ : str =self.dummy_text_encoder
A__ : List[str] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Optional[Any] =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : str =Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
A__ : List[str] =unet.half()
A__ : List[str] =text_encoder.half()
# make sure here that pndm scheduler skips prk
A__ : List[Any] =StableDiffusionUpscalePipeline(
unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , max_noise_level=3_50 , )
A__ : int =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Any ="""A painting of a squirrel eating a burger"""
A__ : Tuple =torch.manual_seed(0 )
A__ : Tuple =sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , ).images
A__ : int =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ : str =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A__ : List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
A__ : Optional[Any] ="""stabilityai/stable-diffusion-x4-upscaler"""
A__ : Tuple =StableDiffusionUpscalePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Optional[int] ="""a cat sitting on a park bench"""
A__ : Optional[int] =torch.manual_seed(0 )
A__ : int =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
A__ : Any =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : Tuple =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A__ : Tuple =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
A__ : List[Any] ="""stabilityai/stable-diffusion-x4-upscaler"""
A__ : Any =StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Dict ="""a cat sitting on a park bench"""
A__ : Union[str, Any] =torch.manual_seed(0 )
A__ : Union[str, Any] =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
A__ : str =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A__ : Union[str, Any] ="""stabilityai/stable-diffusion-x4-upscaler"""
A__ : Optional[int] =StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ : Dict ="""a cat sitting on a park bench"""
A__ : int =torch.manual_seed(0 )
A__ : Optional[Any] =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=5 , output_type="""np""" , )
A__ : Any =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 353
|
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
A__ : List[Any] ={}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
A__ : str =truncation
A__ : Optional[int] =tokenize_kwargs
A__ : List[Any] ={}
if return_tensors is not None:
A__ : Any =return_tensors
return preprocess_params, {}, postprocess_params
def lowercase__ ( self : int , lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Dict ) -> Dict[str, GenericTensor]:
'''simple docstring'''
A__ : List[str] =self.framework
A__ : Union[str, Any] =self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
return model_inputs
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.model(**lowerCAmelCase_ )
return model_outputs
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False ) -> List[Any]:
'''simple docstring'''
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : int , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 136
| 0
|
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : List[str]=5_6 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=9_9 , lowerCAmelCase__ : Dict=3_2 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Tuple=7 , lowerCAmelCase__ : Union[str, Any]="gelu_new" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : List[str]=5_1_2 , lowerCAmelCase__ : Optional[Any]=1_6 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Any="block_sparse" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : Dict=3 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = seq_length
__SCREAMING_SNAKE_CASE : str = is_training
__SCREAMING_SNAKE_CASE : Dict = use_attention_mask
__SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
__SCREAMING_SNAKE_CASE : List[Any] = use_labels
__SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : int = max_position_embeddings
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = num_choices
__SCREAMING_SNAKE_CASE : Tuple = rescale_embeddings
__SCREAMING_SNAKE_CASE : Dict = attention_type
__SCREAMING_SNAKE_CASE : Dict = use_bias
__SCREAMING_SNAKE_CASE : List[Any] = block_size
__SCREAMING_SNAKE_CASE : Dict = num_random_blocks
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : int = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class _UpperCamelCase ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
_A : Any = False
_A : str = False
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_hidden_states_output()
@slow
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
@jax.jit
def model_jitted(lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple=None , **lowerCAmelCase__ : Tuple ):
return model(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE : str = model_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE : Dict = model_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int]=1E-5 , lowerCAmelCase__ : Tuple="outputs" , lowerCAmelCase__ : Any=None ):
"""simple docstring"""
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
| 112
|
import math
def snake_case_ ( snake_case , snake_case ) -> float:
return math.pow(snake_case , 2 ) - a
def snake_case_ ( snake_case ) -> float:
return 2 * x
def snake_case_ ( snake_case ) -> float:
lowercase__: Dict = 2.0
while start <= a:
lowercase__: str = math.pow(snake_case , 2 )
return start
def snake_case_ ( snake_case , snake_case = 99_99 , snake_case = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> float:
if a < 0:
raise ValueError('math domain error' )
lowercase__: Tuple = get_initial_point(snake_case )
for _ in range(snake_case ):
lowercase__: List[Any] = value
lowercase__: Any = value - fx(snake_case , snake_case ) / fx_derivative(snake_case )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 196
| 0
|
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( UpperCAmelCase ):
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : Dict = tempfile.mkdtemp()
a : Tuple = 8
# DPR tok
a : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
a : Union[str, Any] = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(A , exist_ok=A )
a : Optional[Any] = os.path.join(A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
a : int = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
a : List[Any] = dict(zip(A , range(len(A ) ) ) )
a : List[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a : List[str] = {'unk_token': '<unk>'}
a : List[Any] = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(A , exist_ok=A )
a : List[Any] = os.path.join(A , BART_VOCAB_FILES_NAMES['vocab_file'] )
a : Tuple = os.path.join(A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : int = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Optional[int] = self.get_dummy_dataset()
a : Any = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
a : str = dataset
a : Optional[int] = RagRetriever(
A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def lowerCamelCase__ ( self : Optional[Any] , A : bool ):
'''simple docstring'''
a : Tuple = self.get_dummy_dataset()
a : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
a : Any = os.path.join(self.tmpdirname , 'dataset' )
a : str = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
a : int = RagRetriever(
A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
a : Dict = RagRetriever(
A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , A ) , )
return retriever
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Any = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
a : List[str] = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
a : Any = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
a : Union[str, Any] = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(A , open(A , 'wb' ) )
a : Any = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
a : List[str] = RagRetriever(
A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : Union[str, Any] = 1
a : Tuple = self.get_dummy_canonical_hf_index_retriever()
a : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a : Dict = retriever.retrieve(A , n_docs=A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : Dict = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
a : Optional[int] = self.get_dummy_dataset()
retriever.save_pretrained(A )
a : Dict = RagRetriever.from_pretrained(A )
self.assertIsInstance(A , A )
a : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a : Optional[int] = retriever.retrieve(A , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : int = 1
a : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=A )
a : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a : List[str] = retriever.retrieve(A , n_docs=A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : str = self.get_dummy_custom_hf_index_retriever(from_disk=A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(A )
a : Dict = RagRetriever.from_pretrained(A )
self.assertIsInstance(A , A )
a : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a : Union[str, Any] = retriever.retrieve(A , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : Tuple = 1
a : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=A )
a : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a : Dict = retriever.retrieve(A , n_docs=A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(A )
a : Dict = RagRetriever.from_pretrained(A )
self.assertIsInstance(A , A )
a : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a : List[str] = retriever.retrieve(A , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : Any = 1
a : Union[str, Any] = self.get_dummy_legacy_index_retriever()
a : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a : Optional[Any] = retriever.retrieve(A , n_docs=A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , A )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : Any = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(A )
a : int = RagRetriever.from_pretrained(A )
self.assertIsInstance(A , A )
a : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a : Dict = retriever.retrieve(A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
import torch
a : Tuple = 1
a : str = self.get_dummy_canonical_hf_index_retriever()
a : List[str] = [[5, 7], [1_0, 1_1]]
a : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a : Dict = retriever(A , A , prefix=retriever.config.generator.prefix , n_docs=A )
a : Any = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(A , A )
self.assertIsInstance(A , A )
self.assertIsInstance(A , np.ndarray )
a : Union[str, Any] = retriever(
A , A , prefix=retriever.config.generator.prefix , n_docs=A , return_tensors='pt' , )
a : Dict = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(A , torch.Tensor )
self.assertIsInstance(A , torch.Tensor )
self.assertIsInstance(A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : Optional[int] = self.get_dpr_ctx_encoder_tokenizer()
a : int = 1
a : Any = self.get_dummy_custom_hf_index_retriever(from_disk=A )
retriever.set_ctx_encoder_tokenizer(A )
a : Any = [[5, 7], [1_0, 1_1]]
a : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a : Optional[Any] = retriever(A , A , prefix=retriever.config.generator.prefix , n_docs=A )
self.assertEqual(
len(A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , A ) # check for doc token related keys in dictionary.
| 357
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Optional[int] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
a : int = AutoTokenizer.from_pretrained('xlm-roberta-base' )
a : int = 'The dog is cute and lives in the garden house'
a : List[Any] = jnp.array([tokenizer.encode(A )] )
a : int = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
a : Dict = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
a : Any = model(A )['last_hidden_state']
self.assertEqual(output.shape , A )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , A , atol=1E-3 ) )
| 186
| 0
|
from __future__ import annotations
from typing import Any
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
if not postfix_notation:
return 0
lowercase : int = {"""+""", """-""", """*""", """/"""}
lowercase : list[Any] = []
for token in postfix_notation:
if token in operations:
lowercase , lowercase : Dict = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(SCREAMING_SNAKE_CASE__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 97
| 0
|
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _snake_case ( _snake_case : Any ) -> List[str]:
'''simple docstring'''
_A = botoa.client('iam' )
_A = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) )
_A = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def _snake_case ( _snake_case : Any ) -> List[Any]:
'''simple docstring'''
_A = botoa.client('iam' )
return iam_client.get_role(RoleName=A_ )["Role"]["Arn"]
def _snake_case ( ) -> Any:
'''simple docstring'''
_A = _ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , A_ , )
_A = None
if credentials_configuration == 0:
_A = _ask_field('Enter your AWS Profile name: [default] ' , default='default' )
_A = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
_A = _ask_field('AWS Access Key ID: ' )
_A = aws_access_key_id
_A = _ask_field('AWS Secret Access Key: ' )
_A = aws_secret_access_key
_A = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
_A = aws_region
_A = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , A_ , )
if role_management == 0:
_A = _ask_field('Enter your IAM role name: ' )
else:
_A = '''accelerate_sagemaker_execution_role'''
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A_ )
_A = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=A_ , error_message='Please enter yes or no.' , )
_A = None
if is_custom_docker_image:
_A = _ask_field('Enter your Docker image: ' , lambda _snake_case : str(A_ ).lower() )
_A = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=A_ , error_message='Please enter yes or no.' , )
_A = None
if is_sagemaker_inputs_enabled:
_A = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda _snake_case : str(A_ ).lower() , )
_A = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=A_ , error_message='Please enter yes or no.' , )
_A = None
if is_sagemaker_metrics_enabled:
_A = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda _snake_case : str(A_ ).lower() , )
_A = _ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
_A = {}
_A = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=A_ , error_message='Please enter yes or no.' , )
if use_dynamo:
_A = '''dynamo_'''
_A = _ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_A = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=A_ , error_message='Please enter yes or no.' , )
if use_custom_options:
_A = _ask_options(
'Which mode do you want to use?' , A_ , lambda _snake_case : TORCH_DYNAMO_MODES[int(A_ )] , default='default' , )
_A = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=A_ , error_message='Please enter yes or no.' , )
_A = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=A_ , error_message='Please enter yes or no.' , )
_A = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
_A = _ask_options(
A_ , A_ , lambda _snake_case : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_A = _ask_field(A_ , lambda _snake_case : str(A_ ).lower() , default='ml.p3.2xlarge' )
_A = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_A = _ask_field(
'How many machines do you want use? [1]: ' , A_ , default=1 , )
_A = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
| 358
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : "DiagonalGaussianDistribution"
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = True
@register_to_config
def __init__( self : List[str] , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 3 , _UpperCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , _UpperCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , _UpperCAmelCase : Tuple[int] = (64,) , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = "silu" , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 32 , _UpperCAmelCase : int = 32 , _UpperCAmelCase : float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
_A = Encoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , down_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , double_z=_UpperCAmelCase , )
# pass init params to Decoder
_A = Decoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , up_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , act_fn=_UpperCAmelCase , )
_A = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_A = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1 )
_A = False
_A = False
# only relevant if vae tiling is enabled
_A = self.config.sample_size
_A = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_A = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_A = 0.25
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple=False ):
if isinstance(_UpperCAmelCase , (Encoder, Decoder) ):
_A = value
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : bool = True ):
_A = use_tiling
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.enable_tiling(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_A = True
def lowerCAmelCase_ ( self : str ):
_A = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase_ ( self : str ):
_A = {}
def fn_recursive_add_processors(_UpperCAmelCase : str , _UpperCAmelCase : torch.nn.Module , _UpperCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(_UpperCAmelCase , 'set_processor' ):
_A = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _UpperCAmelCase , _UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return processors
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
_A = len(self.attn_processors.keys() )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_UpperCAmelCase )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_UpperCAmelCase : str , _UpperCAmelCase : torch.nn.Module , _UpperCAmelCase : int ):
if hasattr(_UpperCAmelCase , 'set_processor' ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
module.set_processor(_UpperCAmelCase )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _UpperCAmelCase , _UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCAmelCase , return_dict=_UpperCAmelCase )
if self.use_slicing and x.shape[0] > 1:
_A = [self.encoder(_UpperCAmelCase ) for x_slice in x.split(1 )]
_A = torch.cat(_UpperCAmelCase )
else:
_A = self.encoder(_UpperCAmelCase )
_A = self.quant_conv(_UpperCAmelCase )
_A = DiagonalGaussianDistribution(_UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCAmelCase , return_dict=_UpperCAmelCase )
_A = self.post_quant_conv(_UpperCAmelCase )
_A = self.decoder(_UpperCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
@apply_forward_hook
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
if self.use_slicing and z.shape[0] > 1:
_A = [self._decode(_UpperCAmelCase ).sample for z_slice in z.split(1 )]
_A = torch.cat(_UpperCAmelCase )
else:
_A = self._decode(_UpperCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
_A = min(a.shape[2] , b.shape[2] , _UpperCAmelCase )
for y in range(_UpperCAmelCase ):
_A = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] ):
_A = min(a.shape[3] , b.shape[3] , _UpperCAmelCase )
for x in range(_UpperCAmelCase ):
_A = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
_A = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_A = int(self.tile_latent_min_size * self.tile_overlap_factor )
_A = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_A = []
for i in range(0 , x.shape[2] , _UpperCAmelCase ):
_A = []
for j in range(0 , x.shape[3] , _UpperCAmelCase ):
_A = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_A = self.encoder(_UpperCAmelCase )
_A = self.quant_conv(_UpperCAmelCase )
row.append(_UpperCAmelCase )
rows.append(_UpperCAmelCase )
_A = []
for i, row in enumerate(_UpperCAmelCase ):
_A = []
for j, tile in enumerate(_UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_A = self.blend_v(rows[i - 1][j] , _UpperCAmelCase , _UpperCAmelCase )
if j > 0:
_A = self.blend_h(row[j - 1] , _UpperCAmelCase , _UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCAmelCase , dim=3 ) )
_A = torch.cat(_UpperCAmelCase , dim=2 )
_A = DiagonalGaussianDistribution(_UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
_A = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_A = int(self.tile_sample_min_size * self.tile_overlap_factor )
_A = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_A = []
for i in range(0 , z.shape[2] , _UpperCAmelCase ):
_A = []
for j in range(0 , z.shape[3] , _UpperCAmelCase ):
_A = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_A = self.post_quant_conv(_UpperCAmelCase )
_A = self.decoder(_UpperCAmelCase )
row.append(_UpperCAmelCase )
rows.append(_UpperCAmelCase )
_A = []
for i, row in enumerate(_UpperCAmelCase ):
_A = []
for j, tile in enumerate(_UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_A = self.blend_v(rows[i - 1][j] , _UpperCAmelCase , _UpperCAmelCase )
if j > 0:
_A = self.blend_h(row[j - 1] , _UpperCAmelCase , _UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCAmelCase , dim=3 ) )
_A = torch.cat(_UpperCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[torch.Generator] = None , ):
_A = sample
_A = self.encode(_UpperCAmelCase ).latent_dist
if sample_posterior:
_A = posterior.sample(generator=_UpperCAmelCase )
else:
_A = posterior.mode()
_A = self.decode(_UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
| 271
| 0
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
if any(not isinstance(a__ , a__ ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(a__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(a__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 212
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) ->Any:
SCREAMING_SNAKE_CASE : str = scheduler
SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE : Union[str, Any] = split_batches
SCREAMING_SNAKE_CASE : List[Any] = step_with_optimizer
SCREAMING_SNAKE_CASE : List[str] = GradientState()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes
for _ in range(_lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return self.scheduler.get_last_lr()
def __lowerCAmelCase ( self ) ->List[str]:
return self.scheduler.state_dict()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
self.scheduler.load_state_dict(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
return self.scheduler.get_lr()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]:
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
| 313
| 0
|
from __future__ import annotations
from typing import Generic, TypeVar
_UpperCAmelCase : str = TypeVar("""T""")
class lowercase ( Generic[T] ):
def __init__( self , snake_case ):
snake_case_ = data
snake_case_ = self
snake_case_ = 0
class lowercase ( Generic[T] ):
def __init__( self ):
# map from node name to the node object
snake_case_ = {}
def a ( self , snake_case ):
# create a new set with x as its member
snake_case_ = DisjointSetTreeNode(snake_case )
def a ( self , snake_case ):
# find the set x belongs to (with path-compression)
snake_case_ = self.map[data]
if elem_ref != elem_ref.parent:
snake_case_ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def a ( self , snake_case , snake_case ):
# helper function for union operation
if nodea.rank > nodea.rank:
snake_case_ = nodea
else:
snake_case_ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def a ( self , snake_case , snake_case ):
# merge 2 disjoint sets
self.link(self.find_set(snake_case ) , self.find_set(snake_case ) )
class lowercase ( Generic[T] ):
def __init__( self ):
# connections: map from the node to the neighbouring nodes (with weights)
snake_case_ = {}
def a ( self , snake_case ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
snake_case_ = {}
def a ( self , snake_case , snake_case , snake_case ):
# add an edge with the given weight
self.add_node(snake_case )
self.add_node(snake_case )
snake_case_ = weight
snake_case_ = weight
def a ( self ):
snake_case_ = []
snake_case_ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case : x[2] )
# creating the disjoint set
snake_case_ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case )
# MST generation
snake_case_ = 0
snake_case_ = 0
snake_case_ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case_ , snake_case_ , snake_case_ = edges[index]
index += 1
snake_case_ = disjoint_set.find_set(snake_case )
snake_case_ = disjoint_set.find_set(snake_case )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case , snake_case , snake_case )
disjoint_set.union(snake_case , snake_case )
return graph
| 200
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@slow
def a ( self ):
snake_case_ = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
snake_case_ = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(snake_case )
from datasets import load_dataset
snake_case_ = load_dataset('nielsr/rvlcdip-demo' )
snake_case_ = dataset['train'][0]['image'].convert('RGB' )
snake_case_ = image_processor(snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
snake_case_ = model(**snake_case )
snake_case_ = outputs.logits
snake_case_ = torch.Size((1, 16) )
self.assertEqual(logits.shape , snake_case )
snake_case_ = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=snake_case , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case , atol=1e-4 ) )
| 200
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __A :
'''simple docstring'''
lowerCAmelCase : List[Any] = MBartConfig
lowerCAmelCase : int = {}
lowerCAmelCase : Optional[int] = "gelu"
def __init__( self : str ,_snake_case : Any ,_snake_case : Optional[int]=13 ,_snake_case : str=7 ,_snake_case : Any=True ,_snake_case : Union[str, Any]=False ,_snake_case : List[str]=99 ,_snake_case : Any=32 ,_snake_case : Optional[int]=2 ,_snake_case : Dict=4 ,_snake_case : str=37 ,_snake_case : Optional[int]=0.1 ,_snake_case : Dict=0.1 ,_snake_case : List[str]=20 ,_snake_case : Optional[Any]=2 ,_snake_case : Optional[Any]=1 ,_snake_case : Optional[int]=0 ,) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Union[str, Any] = seq_length
lowercase__ : Tuple = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Any = vocab_size
lowercase__ : Tuple = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : Any = hidden_dropout_prob
lowercase__ : Optional[int] = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Union[str, Any] = eos_token_id
lowercase__ : List[Any] = pad_token_id
lowercase__ : Tuple = bos_token_id
def UpperCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
lowercase__ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
lowercase__ : Optional[int] = tf.concat([input_ids, eos_tensor] ,axis=1 )
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
lowercase__ : Tuple = prepare_mbart_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCAmelCase ( self : int ,_snake_case : Dict ,_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = TFMBartModel(config=_snake_case ).get_decoder()
lowercase__ : List[str] = inputs_dict['''input_ids''']
lowercase__ : Dict = input_ids[:1, :]
lowercase__ : Any = inputs_dict['''attention_mask'''][:1, :]
lowercase__ : List[Any] = inputs_dict['''head_mask''']
lowercase__ : List[Any] = 1
# first forward pass
lowercase__ : Any = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
lowercase__ , lowercase__ : int = outputs.to_tuple()
lowercase__ : Dict = past_key_values[1]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> int:
if attention_mask is None:
lowercase__ : int = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase__ : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase__ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCAmelCase : Any = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase : Optional[Any] = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : str = False
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ,_snake_case : Dict ,_snake_case : int ,_snake_case : List[Any] ,_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = TFMBartModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self ,config_class=_snake_case )
def UpperCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_sentencepiece
@require_tokenizers
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
]
lowerCAmelCase : Dict = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
lowerCAmelCase : str = "facebook/mbart-large-en-ro"
@cached_property
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase ( self : List[Any] ,**_snake_case : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = self.translate_src_text(**_snake_case )
self.assertListEqual(self.expected_text ,_snake_case )
def UpperCAmelCase ( self : Any ,**_snake_case : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : str = self.tokenizer(self.src_text ,**_snake_case ,return_tensors='''tf''' )
lowercase__ : Dict = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 )
lowercase__ : List[str] = self.tokenizer.batch_decode(_snake_case ,skip_special_tokens=_snake_case )
return generated_words
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 16
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : str = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Optional[Any] = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 159
|
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : str = len(UpperCamelCase_ ) + 1
_lowerCAmelCase : List[Any] = len(UpperCamelCase_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_lowerCAmelCase : List[Any] = [[0 for i in range(UpperCamelCase_ )] for j in range(UpperCamelCase_ )]
# since string of zero length match pattern of zero length
_lowerCAmelCase : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , UpperCamelCase_ ):
_lowerCAmelCase : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , UpperCamelCase_ ):
_lowerCAmelCase : Tuple = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , UpperCamelCase_ ):
for j in range(1 , UpperCamelCase_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_lowerCAmelCase : Dict = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_lowerCAmelCase : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_lowerCAmelCase : int = dp[i - 1][j]
else:
_lowerCAmelCase : List[str] = 0
else:
_lowerCAmelCase : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_lowerCamelCase : Any = "aab"
_lowerCamelCase : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 159
| 1
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ (UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float = 1E-1_2 , UpperCamelCase__ : int = 100 , ):
assert np.shape(UpperCamelCase__ )[0] == np.shape(UpperCamelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCamelCase__ )[0] == np.shape(UpperCamelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCamelCase__ ) == np.iscomplexobj(UpperCamelCase__ )
_UpperCAmelCase : Tuple = np.iscomplexobj(UpperCamelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCamelCase__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : int = 0
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Optional[Any] = 1E1_2
while not convergence:
# Multiple matrix by the vector.
_UpperCAmelCase : Optional[Any] = np.dot(UpperCamelCase__ , UpperCamelCase__ )
# Normalize the resulting output vector.
_UpperCAmelCase : Optional[Any] = w / np.linalg.norm(UpperCamelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_UpperCAmelCase : str = vector.conj().T if is_complex else vector.T
_UpperCAmelCase : Any = np.dot(UpperCamelCase__ , np.dot(UpperCamelCase__ , UpperCamelCase__ ) )
# Check convergence.
_UpperCAmelCase : Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Optional[int] = lambda_
if is_complex:
_UpperCAmelCase : Union[str, Any] = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase_ ():
_UpperCAmelCase : int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_UpperCAmelCase : List[str] = np.array([41, 4, 20] )
_UpperCAmelCase : int = real_input_matrix.astype(np.complexaaa )
_UpperCAmelCase : Optional[Any] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_UpperCAmelCase : str = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_UpperCAmelCase : List[str] = real_input_matrix
_UpperCAmelCase : Optional[int] = real_vector
elif problem_type == "complex":
_UpperCAmelCase : str = complex_input_matrix
_UpperCAmelCase : Dict = complex_vector
# Our implementation.
_UpperCAmelCase , _UpperCAmelCase : str = power_iteration(UpperCamelCase__ , UpperCamelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = np.linalg.eigh(UpperCamelCase__ )
# Last eigenvalue is the maximum one.
_UpperCAmelCase : Union[str, Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_UpperCAmelCase : List[str] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCamelCase__ ) - np.abs(UpperCamelCase__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 263
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263
| 1
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__a = '''
import os
'''
__a = '''
def foo():
import os
return False
'''
__a = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
__a = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
__a = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
__a = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
__a = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
__a = '''
import os
try:
import bar
except:
raise ValueError()
'''
__a = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
__a = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
__a = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''', _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
lowercase : str = os.path.join(_UpperCamelCase, '''test_file.py''' )
with open(_UpperCamelCase, '''w''' ) as _tmp_file:
_tmp_file.write(_UpperCamelCase )
lowercase : str = get_imports(_UpperCamelCase )
assert parsed_imports == ["os"]
| 350
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : int = 0
@slow
def __lowerCamelCase ( self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowercase : Optional[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(SCREAMING_SNAKE_CASE__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowercase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(SCREAMING_SNAKE_CASE__ ) , 0 )
def __lowerCamelCase ( self ):
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __lowerCamelCase ( self ):
lowercase : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Check that tokenizer_type ≠ model_type
lowercase : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __lowerCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.txt''' ) )
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''bert''' , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''merges.txt''' ) )
lowercase : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''gpt2''' , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_tokenizers
def __lowerCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.txt''' ) )
lowercase : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''bert''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''merges.txt''' ) )
lowercase : int = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''gpt2''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def __lowerCamelCase ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowercase : Union[str, Any] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , SCREAMING_SNAKE_CASE__ )
else:
self.assertEqual(tokenizer.do_lower_case , SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def __lowerCamelCase ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowercase : str = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def __lowerCamelCase ( self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowercase : Any = TOKENIZER_MAPPING.values()
lowercase : Tuple = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(SCREAMING_SNAKE_CASE__ )
@require_tokenizers
def __lowerCamelCase ( self ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , SCREAMING_SNAKE_CASE__ )
@require_tokenizers
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = '''Hello, world. How are you?'''
lowercase : Any = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertEqual('''[UNK]''' , tokens[0] )
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def __lowerCamelCase ( self ):
lowercase : int = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def __lowerCamelCase ( self ):
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Check we can load the tokenizer config of an online model.
lowercase : Optional[Any] = get_tokenizer_config('''bert-base-cased''' )
lowercase : str = config.pop('''_commit_hash''' , SCREAMING_SNAKE_CASE__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(SCREAMING_SNAKE_CASE__ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowercase : Union[str, Any] = get_tokenizer_config(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowercase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = get_tokenizer_config(SCREAMING_SNAKE_CASE__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def __lowerCamelCase ( self ):
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
lowercase : int = CustomTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __lowerCamelCase ( self ):
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
# Can register in two steps
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Union[str, Any] = BertTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE__ )
bert_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = CustomTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowercase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def __lowerCamelCase ( self ):
class __SCREAMING_SNAKE_CASE ( A__ ):
A : str = False
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = NewTokenizer
A : Optional[int] = False
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
# If remote code is not set, the default is to use local
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowercase : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowercase : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowercase : List[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self ):
lowercase : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowercase : List[Any] = AutoTokenizer.from_pretrained('''bert-base''' )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , revision='''aaaaaa''' )
def __lowerCamelCase ( self ):
# Make sure we have cached the tokenizer.
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowercase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 173
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = torch.device("cpu")
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : int ) -> List[str]:
"""simple docstring"""
snake_case = dct.pop(_UpperCamelCase )
snake_case = val
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case = []
for k in state_dict.keys():
snake_case = k
if ".pwconv" in k:
snake_case = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
snake_case = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
snake_case = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
snake_case = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
snake_case = k_new.split('.' )
if ls[2].isdigit():
snake_case = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
snake_case = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
"""simple docstring"""
snake_case = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
snake_case = 1_0_0_0
snake_case = 'huggingface/label-files'
snake_case = 'imagenet-1k-id2label.json'
snake_case = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
snake_case = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
snake_case = [3, 3, 6, 4]
snake_case = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
snake_case = [3, 3, 9, 6]
snake_case = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
snake_case = [4, 3, 1_0, 5]
snake_case = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
snake_case = [4, 4, 1_2, 6]
snake_case = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
snake_case = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='cpu' , check_hash=_UpperCamelCase )
else:
snake_case = torch.load(_UpperCamelCase , map_location='cpu' )
snake_case = checkpoint
snake_case = create_rename_keys(_UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
snake_case = SwiftFormerForImageClassification(_UpperCamelCase ).eval()
hf_model.load_state_dict(_UpperCamelCase )
# prepare test inputs
snake_case = prepare_img()
snake_case = ViTImageProcessor.from_pretrained('preprocessor_config' )
snake_case = processor(images=_UpperCamelCase , return_tensors='pt' )
# compare outputs from both models
snake_case = get_expected_output(_UpperCamelCase )
snake_case = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , _UpperCamelCase , atol=1e-3 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 150
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
SCREAMING_SNAKE_CASE__ = {"mgp-str": 27}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
_lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCAmelCase , lowerCAmelCase="[GO]" , lowerCAmelCase="[GO]" , lowerCAmelCase="[s]" , lowerCAmelCase="[GO]" , **lowerCAmelCase ):
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase , )
with open(lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
snake_case = json.load(lowerCAmelCase )
snake_case = {v: k for k, v in self.vocab.items()}
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.vocab )
def snake_case ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = []
for s in text:
char_tokens.extend(lowerCAmelCase )
return char_tokens
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.vocab.get(lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase ) )
return
snake_case = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase ) + '\n' )
return (vocab_file,)
| 150
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_A = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether tp freeze the encoder."} )
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase__ : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
UpperCAmelCase__ : Optional[int] = field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
UpperCAmelCase__ : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
UpperCAmelCase__ : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "Source language id for translation."} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "Target language id for translation."} )
UpperCAmelCase__ : Optional[int] = field(default=A_ , metadata={"help": "# num_beams to use for evaluation."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def lowercase_ ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(A__ , os.path.join(A__ , F'{split}_results.json' ) )
def lowercase_ ( ) -> Tuple:
"""simple docstring"""
snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case = parser.parse_args_into_dataclasses()
check_output_dir(A__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , A__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(A__ , A__ , A__ ):
assert hasattr(A__ , A__ ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(A__ , A__ , getattr(A__ , A__ ) )
snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=A__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(A__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(A__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(A__ , A__ ):
snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(A__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
snake_case = SeqaSeqDataset
# Get datasets
snake_case = (
dataset_class(
A__ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
snake_case = (
dataset_class(
A__ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
snake_case = (
dataset_class(
A__ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
snake_case = (
build_compute_metrics_fn(data_args.task , A__ ) if training_args.predict_with_generate else None
)
snake_case = SeqaSeqTrainer(
model=A__ , args=A__ , data_args=A__ , train_dataset=A__ , eval_dataset=A__ , data_collator=SeqaSeqDataCollator(
A__ , A__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=A__ , tokenizer=A__ , )
snake_case = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
snake_case = train_result.metrics
snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , A__ , training_args.output_dir )
all_metrics.update(A__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case = trainer.evaluate(metric_key_prefix="val" )
snake_case = data_args.n_val
snake_case = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
snake_case = trainer.predict(test_dataset=A__ , metric_key_prefix="test" )
snake_case = test_output.metrics
snake_case = data_args.n_test
if trainer.is_world_process_zero():
snake_case = round(metrics["test_loss"] , 4 )
handle_metrics("test" , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.predict_with_generate:
snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
snake_case = lmap(str.strip , A__ )
write_txt_file(A__ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(A__ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def lowercase_ ( A__ ) -> Optional[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 360
|
def lowercase_ ( A__ = 1000 ) -> int:
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 137
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : list ):
def merge(_snake_case : list , _snake_case : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_snake_case ) <= 1:
return collection
lowerCAmelCase : Union[str, Any] = len(_snake_case ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 60
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> str:
"""simple docstring"""
with open(__snake_case ) as metadata_file:
_UpperCamelCase = json.load(__snake_case )
_UpperCamelCase = LukeConfig(use_entity_aware_attention=__snake_case, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCamelCase = torch.load(__snake_case, map_location='''cpu''' )
# Load the entity vocab file
_UpperCamelCase = load_entity_vocab(__snake_case )
_UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase = AddedToken('''<ent>''', lstrip=__snake_case, rstrip=__snake_case )
_UpperCamelCase = AddedToken('''<ent2>''', lstrip=__snake_case, rstrip=__snake_case )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__snake_case )
with open(os.path.join(__snake_case, LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(__snake_case, __snake_case )
_UpperCamelCase = LukeTokenizer.from_pretrained(__snake_case )
# Initialize the embeddings of the special tokens
_UpperCamelCase = state_dict['''embeddings.word_embeddings.weight''']
_UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase = state_dict[prefix + matrix_name]
_UpperCamelCase = state_dict[prefix + matrix_name]
_UpperCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCamelCase = entity_emb[entity_vocab['''[MASK]''']]
_UpperCamelCase = LukeModel(config=__snake_case ).eval()
_UpperCamelCase , _UpperCamelCase = model.load_state_dict(__snake_case, strict=__snake_case )
if not (len(__snake_case ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {", ".join(__snake_case )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
_UpperCamelCase = LukeTokenizer.from_pretrained(__snake_case, task='''entity_classification''' )
_UpperCamelCase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCamelCase = (39, 42)
_UpperCamelCase = tokenizer(__snake_case, entity_spans=[span], add_prefix_space=__snake_case, return_tensors='''pt''' )
_UpperCamelCase = model(**__snake_case )
# Verify word hidden states
if model_size == "large":
_UpperCamelCase = torch.Size((1, 42, 10_24) )
_UpperCamelCase = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCamelCase = torch.Size((1, 42, 7_68) )
_UpperCamelCase = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], __snake_case, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCamelCase = torch.Size((1, 1, 10_24) )
_UpperCamelCase = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCamelCase = torch.Size((1, 1, 7_68) )
_UpperCamelCase = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], __snake_case, atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__snake_case ) )
model.save_pretrained(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
with open(__snake_case, '''r''', encoding='''utf-8''' ) as f:
for index, line in enumerate(__snake_case ):
_UpperCamelCase , _UpperCamelCase = line.rstrip().split('''\t''' )
_UpperCamelCase = index
return entity_vocab
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_a = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 194
| 0
|
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Dict=None ,lowercase__ : Optional[int]=None ):
__lowercase = self.layer[current_layer](lowercase__ ,lowercase__ ,head_mask[current_layer] )
__lowercase = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Tuple ):
super().__init__(lowercase__ )
__lowercase = BertEncoderWithPabee(lowercase__ )
self.init_weights()
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Union[str, Any] ):
__lowercase = threshold
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Union[str, Any] ):
__lowercase = patience
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = 0
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.inference_layers_num / self.inference_instances_num
__lowercase = (
F"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
F" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(lowercase__ )
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int=None ,lowercase__ : int=None ,lowercase__ : Optional[Any]=None ,lowercase__ : Dict=None ,lowercase__ : Optional[int]=None ,lowercase__ : List[str]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Tuple=None ,lowercase__ : List[Any]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Optional[int]=False ,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__lowercase = input_ids.size()
elif inputs_embeds is not None:
__lowercase = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__lowercase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowercase = torch.ones(lowercase__ ,device=lowercase__ )
if token_type_ids is None:
__lowercase = torch.zeros(lowercase__ ,dtype=torch.long ,device=lowercase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowercase = self.get_extended_attention_mask(lowercase__ ,lowercase__ ,lowercase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__lowercase , __lowercase , __lowercase = encoder_hidden_states.size()
__lowercase = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__lowercase = torch.ones(lowercase__ ,device=lowercase__ )
__lowercase = self.invert_attention_mask(lowercase__ )
else:
__lowercase = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowercase = self.get_head_mask(lowercase__ ,self.config.num_hidden_layers )
__lowercase = self.embeddings(
input_ids=lowercase__ ,position_ids=lowercase__ ,token_type_ids=lowercase__ ,inputs_embeds=lowercase__ )
__lowercase = embedding_output
if self.training:
__lowercase = []
for i in range(self.config.num_hidden_layers ):
__lowercase = self.encoder.adaptive_forward(
lowercase__ ,current_layer=lowercase__ ,attention_mask=lowercase__ ,head_mask=lowercase__ )
__lowercase = self.pooler(lowercase__ )
__lowercase = output_layers[i](output_dropout(lowercase__ ) )
res.append(lowercase__ )
elif self.patience == 0: # Use all layers for inference
__lowercase = self.encoder(
lowercase__ ,attention_mask=lowercase__ ,head_mask=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = self.pooler(encoder_outputs[0] )
__lowercase = [output_layers[self.config.num_hidden_layers - 1](lowercase__ )]
else:
__lowercase = 0
__lowercase = None
__lowercase = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__lowercase = self.encoder.adaptive_forward(
lowercase__ ,current_layer=lowercase__ ,attention_mask=lowercase__ ,head_mask=lowercase__ )
__lowercase = self.pooler(lowercase__ )
__lowercase = output_layers[i](lowercase__ )
if regression:
__lowercase = logits.detach()
if patient_result is not None:
__lowercase = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__lowercase = 0
else:
__lowercase = logits.detach().argmax(dim=1 )
if patient_result is not None:
__lowercase = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowercase__ ) ):
patient_counter += 1
else:
__lowercase = 0
__lowercase = logits
if patient_counter == self.patience:
break
__lowercase = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : Optional[int] ):
super().__init__(lowercase__ )
__lowercase = config.num_labels
__lowercase = BertModelWithPabee(lowercase__ )
__lowercase = nn.Dropout(config.hidden_dropout_prob )
__lowercase = nn.ModuleList(
[nn.Linear(config.hidden_size ,self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str=None ,lowercase__ : Any=None ,lowercase__ : str=None ,lowercase__ : Optional[Any]=None ,lowercase__ : Optional[Any]=None ,lowercase__ : Optional[int]=None ,lowercase__ : int=None ,):
__lowercase = self.bert(
input_ids=lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,position_ids=lowercase__ ,head_mask=lowercase__ ,inputs_embeds=lowercase__ ,output_dropout=self.dropout ,output_layers=self.classifiers ,regression=self.num_labels == 1 ,)
__lowercase = (logits[-1],)
if labels is not None:
__lowercase = None
__lowercase = 0
for ix, logits_item in enumerate(lowercase__ ):
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(logits_item.view(-1 ) ,labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits_item.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
if total_loss is None:
__lowercase = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__lowercase = (total_loss / total_weights,) + outputs
return outputs
| 365
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : str ,lowercase__ : List[str]=1_3 ,lowercase__ : str=7 ,lowercase__ : Tuple=True ,lowercase__ : Dict=True ,lowercase__ : str=True ,lowercase__ : Optional[Any]=True ,lowercase__ : List[str]=9_9 ,lowercase__ : int=[1, 1, 2] ,lowercase__ : int=1 ,lowercase__ : Tuple=3_2 ,lowercase__ : Union[str, Any]=4 ,lowercase__ : Tuple=8 ,lowercase__ : Any=3_7 ,lowercase__ : Union[str, Any]="gelu_new" ,lowercase__ : Tuple=0.1 ,lowercase__ : int=0.1 ,lowercase__ : Optional[int]=0.0 ,lowercase__ : Union[str, Any]=5_1_2 ,lowercase__ : Dict=3 ,lowercase__ : Union[str, Any]=0.0_2 ,lowercase__ : Any=3 ,lowercase__ : Tuple=4 ,lowercase__ : Dict=None ,lowercase__ : List[Any]=False ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = block_sizes
__lowercase = num_decoder_layers
__lowercase = d_model
__lowercase = n_head
__lowercase = d_head
__lowercase = d_inner
__lowercase = hidden_act
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = 2
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase = n_head
# Used in the tests to check the size of the first hidden state
__lowercase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : str ,):
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,):
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,lowercase__ : Dict ,):
__lowercase = TFFunnelForPreTraining(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,):
__lowercase = TFFunnelForMaskedLM(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : Tuple ,lowercase__ : List[str] ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForSequenceClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : Any ,lowercase__ : Tuple ,):
__lowercase = self.num_choices
__lowercase = TFFunnelForMultipleChoice(config=lowercase__ )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : str ,lowercase__ : Any ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForTokenClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,):
__lowercase = TFFunnelForQuestionAnswering(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : int = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : List[str] = False
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = TFFunnelModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Tuple = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = TFFunnelModelTester(self ,base=lowercase__ )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 52
| 0
|
import math
def lowerCamelCase_ ( lowerCamelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( lowerCamelCase__ = 0.1 ):
lowerCamelCase_ = 3
lowerCamelCase_ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCamelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {'vocab_file': 'spiece.model'}
_lowerCamelCase : Optional[int] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
_lowerCamelCase : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Tuple = 1
_lowerCamelCase : int = 2
_lowerCamelCase : Dict = 3
_lowerCamelCase : Union[str, Any] = 4
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Any = """left"""
def __init__( self : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : str=False , _lowerCamelCase : Optional[Any]="<s>" , _lowerCamelCase : List[str]="</s>" , _lowerCamelCase : Union[str, Any]="<unk>" , _lowerCamelCase : List[Any]="<sep>" , _lowerCamelCase : str="<pad>" , _lowerCamelCase : Dict="<cls>" , _lowerCamelCase : str="<mask>" , _lowerCamelCase : Optional[int]=["<eop>", "<eod>"] , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
A_ : Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : str = 3
A_ : Union[str, Any] = do_lower_case
A_ : Tuple = remove_space
A_ : int = keep_accents
A_ : Optional[Any] = vocab_file
A_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def a_ ( self : int ):
"""simple docstring"""
return len(self.sp_model )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
"""simple docstring"""
A_ : str = self.__dict__.copy()
A_ : Tuple = None
return state
def __setstate__( self : Tuple , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : List[Any] = {}
A_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
if self.remove_space:
A_ : str = ''' '''.join(inputs.strip().split() )
else:
A_ : Any = inputs
A_ : List[str] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
A_ : Any = unicodedata.normalize('''NFKD''' , _lowerCamelCase )
A_ : List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(_lowerCamelCase )] )
if self.do_lower_case:
A_ : str = outputs.lower()
return outputs
def a_ ( self : List[str] , _lowerCamelCase : str ):
"""simple docstring"""
A_ : str = self.preprocess_text(_lowerCamelCase )
A_ : int = self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
A_ : List[Any] = []
for piece in pieces:
if len(_lowerCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
A_ : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A_ : Tuple = cur_pieces[1:]
else:
A_ : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCamelCase )
else:
new_pieces.append(_lowerCamelCase )
return new_pieces
def a_ ( self : Any , _lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(_lowerCamelCase )
def a_ ( self : Any , _lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.sp_model.IdToPiece(_lowerCamelCase )
def a_ ( self : List[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Any = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip()
return out_string
def a_ ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : bool = False , _lowerCamelCase : bool = None , _lowerCamelCase : bool = True , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : int = kwargs.pop('''use_source_tokenizer''' , _lowerCamelCase )
A_ : List[str] = self.convert_ids_to_tokens(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : Any = []
A_ : List[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
A_ : int = []
sub_texts.append(_lowerCamelCase )
else:
current_sub_text.append(_lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A_ : Optional[int] = ''''''.join(_lowerCamelCase )
A_ : Any = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Dict = self.clean_up_tokenization(_lowerCamelCase )
return clean_text
else:
return text
def a_ ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
A_ : Optional[int] = [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def a_ ( self : List[str] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1, 1]
return ([0] * len(_lowerCamelCase )) + [1, 1]
def a_ ( self : int , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
A_ : List[Any] = [self.sep_token_id]
A_ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def a_ ( self : int , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 167
| 0
|
import math
def __magic_name__ ( __a : int ):
'''simple docstring'''
return math.sqrt(__a ) * math.sqrt(__a ) == num
def __magic_name__ ( __a : int ):
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = n
while left <= right:
UpperCamelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCamelCase__ = mid - 1
else:
UpperCamelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = BlipImageProcessor()
UpperCamelCase__ = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCamelCase__ = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
UpperCamelCase__ = InstructBlipProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).tokenizer
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).qformer_tokenizer
def UpperCAmelCase_ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ (self ):
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase__ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor.qformer_tokenizer , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" )
UpperCamelCase__ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = qformer_tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 178
| 1
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
lowerCamelCase__ = os.path.join(torch_cache_home, """transformers""")
lowerCamelCase__ = """https://cdn.huggingface.co"""
lowerCamelCase__ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
lowerCamelCase__ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
lowerCamelCase__ = os.path.join(PATH, """config.yaml""")
lowerCamelCase__ = os.path.join(PATH, """attributes.txt""")
lowerCamelCase__ = os.path.join(PATH, """objects.txt""")
lowerCamelCase__ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
lowerCamelCase__ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = """pytorch_model.bin"""
lowerCamelCase__ = """config.yaml"""
def __lowerCAmelCase (_UpperCamelCase=OBJECTS , _UpperCamelCase=ATTRIBUTES ):
__lowerCAmelCase : Any = []
with open(_UpperCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
__lowerCAmelCase : Union[str, Any] = []
with open(_UpperCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = OrderedDict()
with open(_UpperCamelCase , 'rb' ) as f:
__lowerCAmelCase : str = pkl.load(_UpperCamelCase )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
__lowerCAmelCase : int = ckp.pop(_UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
__lowerCAmelCase : List[Any] = torch.tensor(_UpperCamelCase )
else:
assert isinstance(_UpperCamelCase , torch.tensor ), type(_UpperCamelCase )
__lowerCAmelCase : Dict = v
return r
class A__ :
A_ : int = {}
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "root" , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = name
__lowerCAmelCase : Any = level
__lowerCAmelCase : Optional[int] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__lowerCAmelCase : int = copy.deepcopy(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = copy.deepcopy(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = Config(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE , level=level + 1 )
__lowerCAmelCase : List[Any] = v
setattr(self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = val
__lowerCAmelCase : Tuple = val
__lowerCAmelCase : List[str] = key.split('.' )
__lowerCAmelCase : List[Any] = len(_SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase : Tuple = self._pointer
if len(_SCREAMING_SNAKE_CASE ) > 1:
for i, l in enumerate(_SCREAMING_SNAKE_CASE ):
if hasattr(self , _SCREAMING_SNAKE_CASE ) and isinstance(getattr(self , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
setattr(getattr(self , _SCREAMING_SNAKE_CASE ) , '.'.join(levels[i:] ) , _SCREAMING_SNAKE_CASE )
if l == last_level:
__lowerCAmelCase : Tuple = val
else:
__lowerCAmelCase : List[str] = pointer[l]
def __lowerCamelCase ( self ):
return self._pointer
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with open(f"{file_name}" , 'w' ) as stream:
dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with open(f"{file_name}" , 'w' ) as stream:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE ) as stream:
__lowerCAmelCase : Dict = load(_SCREAMING_SNAKE_CASE , Loader=_SCREAMING_SNAKE_CASE )
return data
def __str__( self ):
__lowerCAmelCase : Union[str, Any] = ' '
if self._name != "root":
__lowerCAmelCase : int = f"{t * (self._level-1)}{self._name}:\n"
else:
__lowerCAmelCase : Tuple = ''
__lowerCAmelCase : str = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(_SCREAMING_SNAKE_CASE ).__name__})\n"
__lowerCAmelCase : List[str] = level
return r[:-1]
@classmethod
def __lowerCamelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return cls(_SCREAMING_SNAKE_CASE )
@classmethod
def __lowerCamelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = kwargs.pop('cache_dir' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = kwargs.pop('force_download' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = kwargs.pop('resume_download' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = kwargs.pop('proxies' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = kwargs.pop('local_files_only' , _SCREAMING_SNAKE_CASE )
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif os.path.isfile(_SCREAMING_SNAKE_CASE ) or is_remote_url(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = pretrained_model_name_or_path
else:
__lowerCAmelCase : List[str] = hf_bucket_url(_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , use_cdn=_SCREAMING_SNAKE_CASE )
try:
# Load from URL or cache if already cached
__lowerCAmelCase : List[str] = cached_path(
_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__lowerCAmelCase : int = Config.load_yaml(_SCREAMING_SNAKE_CASE )
except EnvironmentError:
__lowerCAmelCase : Tuple = 'Can\'t load config for'
raise EnvironmentError(_SCREAMING_SNAKE_CASE )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(_SCREAMING_SNAKE_CASE ), kwargs
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = torch.load('dump.pt' , map_location=in_tensor.device )
__lowerCAmelCase : Optional[int] = in_tensor.numpy()
__lowerCAmelCase : Dict = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_UpperCamelCase , _UpperCamelCase , rtol=0.01 , atol=0.1 ), (
F"{sum([1 for x in np.isclose(_UpperCamelCase , _UpperCamelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = urlparse(_UpperCamelCase )
return parsed.scheme in ("http", "https")
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ):
__lowerCAmelCase : str = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__lowerCAmelCase : Any = '/' not in model_id
if legacy_format:
return F"{endpoint}/{model_id}-{filename}"
else:
return F"{endpoint}/{model_id}/{filename}"
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=0 , _UpperCamelCase=None , ):
__lowerCAmelCase : str = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
ua += "; " + "; ".join('{}/{}'.format(_UpperCamelCase , _UpperCamelCase ) for k, v in user_agent.items() )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
ua += "; " + user_agent
__lowerCAmelCase : Dict = {'user-agent': ua}
if resume_size > 0:
__lowerCAmelCase : Any = 'bytes=%d-' % (resume_size,)
__lowerCAmelCase : int = requests.get(_UpperCamelCase , stream=_UpperCamelCase , proxies=_UpperCamelCase , headers=_UpperCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__lowerCAmelCase : Optional[Any] = response.headers.get('Content-Length' )
__lowerCAmelCase : List[str] = resume_size + int(_UpperCamelCase ) if content_length is not None else None
__lowerCAmelCase : Tuple = tqdm(
unit='B' , unit_scale=_UpperCamelCase , total=_UpperCamelCase , initial=_UpperCamelCase , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_UpperCamelCase ) )
temp_file.write(_UpperCamelCase )
progress.close()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=10 , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=False , ):
if cache_dir is None:
__lowerCAmelCase : Dict = TRANSFORMERS_CACHE
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = str(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
__lowerCAmelCase : str = None
if not local_files_only:
try:
__lowerCAmelCase : int = requests.head(_UpperCamelCase , allow_redirects=_UpperCamelCase , proxies=_UpperCamelCase , timeout=_UpperCamelCase )
if response.status_code == 200:
__lowerCAmelCase : int = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__lowerCAmelCase : Any = url_to_filename(_UpperCamelCase , _UpperCamelCase )
# get cache path to put the file
__lowerCAmelCase : List[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_UpperCamelCase ):
return cache_path
else:
__lowerCAmelCase : int = [
file
for file in fnmatch.filter(os.listdir(_UpperCamelCase ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(_UpperCamelCase ) > 0:
return os.path.join(_UpperCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(_UpperCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__lowerCAmelCase : Tuple = cache_path + '.lock'
with FileLock(_UpperCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(_UpperCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__lowerCAmelCase : Tuple = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(_UpperCamelCase , 'a+b' ) as f:
yield f
__lowerCAmelCase : str = _resumable_file_manager
if os.path.exists(_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = os.stat(_UpperCamelCase ).st_size
else:
__lowerCAmelCase : Optional[int] = 0
else:
__lowerCAmelCase : Optional[int] = partial(tempfile.NamedTemporaryFile , dir=_UpperCamelCase , delete=_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , _UpperCamelCase , temp_file.name , )
http_get(
_UpperCamelCase , _UpperCamelCase , proxies=_UpperCamelCase , resume_size=_UpperCamelCase , user_agent=_UpperCamelCase , )
os.replace(temp_file.name , _UpperCamelCase )
__lowerCAmelCase : Tuple = {'url': url, 'etag': etag}
__lowerCAmelCase : List[str] = cache_path + '.json'
with open(_UpperCamelCase , 'w' ) as meta_file:
json.dump(_UpperCamelCase , _UpperCamelCase )
return cache_path
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None ):
__lowerCAmelCase : int = url.encode('utf-8' )
__lowerCAmelCase : List[Any] = shaaaa(_UpperCamelCase )
__lowerCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__lowerCAmelCase : List[Any] = etag.encode('utf-8' )
__lowerCAmelCase : Any = shaaaa(_UpperCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , ):
if cache_dir is None:
__lowerCAmelCase : str = TRANSFORMERS_CACHE
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[int] = str(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = str(_UpperCamelCase )
if is_remote_url(_UpperCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__lowerCAmelCase : List[str] = get_from_cache(
_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , proxies=_UpperCamelCase , resume_download=_UpperCamelCase , user_agent=_UpperCamelCase , local_files_only=_UpperCamelCase , )
elif os.path.exists(_UpperCamelCase ):
# File, and it exists.
__lowerCAmelCase : Union[str, Any] = url_or_filename
elif urlparse(_UpperCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(_UpperCamelCase ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(_UpperCamelCase ) )
if extract_compressed_file:
if not is_zipfile(_UpperCamelCase ) and not tarfile.is_tarfile(_UpperCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = os.path.split(_UpperCamelCase )
__lowerCAmelCase : List[str] = output_file.replace('.' , '-' ) + '-extracted'
__lowerCAmelCase : Union[str, Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__lowerCAmelCase : Dict = output_path + '.lock'
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
os.makedirs(_UpperCamelCase )
if is_zipfile(_UpperCamelCase ):
with ZipFile(_UpperCamelCase , 'r' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
elif tarfile.is_tarfile(_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(_UpperCamelCase ) )
return output_path_extracted
return output_path
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase="," ):
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if os.path.isfile(_UpperCamelCase ):
with open(_UpperCamelCase ) as f:
__lowerCAmelCase : Optional[int] = eval(f.read() )
else:
__lowerCAmelCase : str = requests.get(_UpperCamelCase )
try:
__lowerCAmelCase : Optional[int] = requests.json()
except Exception:
__lowerCAmelCase : Dict = req.content.decode()
assert data is not None, "could not connect"
try:
__lowerCAmelCase : str = eval(_UpperCamelCase )
except Exception:
__lowerCAmelCase : List[Any] = data.split('\n' )
req.close()
return data
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : int = requests.get(_UpperCamelCase )
__lowerCAmelCase : Dict = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[Any] = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_UpperCamelCase )
with open(_UpperCamelCase , 'rb' ) as stream:
__lowerCAmelCase : Optional[int] = pkl.load(_UpperCamelCase )
__lowerCAmelCase : Tuple = weights.pop('model' )
__lowerCAmelCase : str = {}
for k, v in model.items():
__lowerCAmelCase : str = torch.from_numpy(_UpperCamelCase )
if "running_var" in k:
__lowerCAmelCase : Optional[Any] = torch.tensor([0] )
__lowerCAmelCase : List[str] = k.replace('running_var' , 'num_batches_tracked' )
__lowerCAmelCase : Tuple = zero
return new
def __lowerCAmelCase ():
print(F"{os.path.abspath(os.path.join(_UpperCamelCase , os.pardir ) )}/demo.ipynb" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase="RGB" ):
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if os.path.isfile(_UpperCamelCase ):
__lowerCAmelCase : List[str] = cva.imread(_UpperCamelCase )
else:
__lowerCAmelCase : str = get_image_from_url(_UpperCamelCase )
assert img is not None, F"could not connect to: {im}"
__lowerCAmelCase : Dict = cva.cvtColor(_UpperCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__lowerCAmelCase : Tuple = img[:, :, ::-1]
return img
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=1 ):
return (images[i : i + batch] for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase ))
| 86
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->str:
'''simple docstring'''
a, a : str = image.size
a, a : Tuple = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a : Union[str, Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
a : int = np.array(_lowercase ).astype(np.floataa ) / 255.0
a : List[str] = image[None].transpose(0 , 3 , 1 , 2 )
a : Dict = torch.from_numpy(_lowercase )
return 2.0 * image - 1.0
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> str:
super().__init__()
self.register_modules(vqvae=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowerCAmelCase__ , PIL.Image.Image ):
a : int = 1
elif isinstance(lowerCAmelCase__ , torch.Tensor ):
a : str = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase__ )}""" )
if isinstance(lowerCAmelCase__ , PIL.Image.Image ):
a : Tuple = preprocess(lowerCAmelCase__ )
a, a : Optional[Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a : Tuple = (batch_size, self.unet.config.in_channels // 2, height, width)
a : List[str] = next(self.unet.parameters() ).dtype
a : Any = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
a : Union[str, Any] = image.to(device=self.device , dtype=lowerCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase__ , device=self.device )
a : int = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a : str = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a : List[Any] = {}
if accepts_eta:
a : Any = eta
for t in self.progress_bar(lowerCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
a : str = torch.cat([latents, image] , dim=1 )
a : int = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
a : Union[str, Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
a : Tuple = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
a : List[Any] = self.vqvae.decode(lowerCAmelCase__ ).sample
a : int = torch.clamp(lowerCAmelCase__ , -1.0 , 1.0 )
a : Optional[int] = image / 2 + 0.5
a : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a : Union[str, Any] = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 105
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
snake_case__ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case__ : Tuple = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def _lowerCamelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Any=8 ):
"""simple docstring"""
UpperCAmelCase_ : Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCAmelCase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=snake_case_ , tokenizer=snake_case_ , unet=snake_case_ , scheduler=snake_case_ , movq=snake_case_ , )
UpperCAmelCase_ : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if latents is None:
UpperCAmelCase_ : Tuple = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCAmelCase_ : Tuple = latents.to(snake_case_ )
UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = len(snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else 1
# get prompt text embeddings
UpperCAmelCase_ : Optional[Any] = self.tokenizer(
snake_case_ , padding='max_length' , truncation=snake_case_ , max_length=7_7 , return_attention_mask=snake_case_ , add_special_tokens=snake_case_ , return_tensors='pt' , )
UpperCAmelCase_ : List[str] = text_inputs.input_ids
UpperCAmelCase_ : Optional[int] = self.tokenizer(snake_case_ , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(snake_case_ , snake_case_ ):
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase_ : Optional[Any] = text_input_ids.to(snake_case_ )
UpperCAmelCase_ : Dict = text_inputs.attention_mask.to(snake_case_ )
UpperCAmelCase_ : int = self.text_encoder(
input_ids=snake_case_ , attention_mask=snake_case_ )
UpperCAmelCase_ : str = prompt_embeds.repeat_interleave(snake_case_ , dim=0 )
UpperCAmelCase_ : Dict = text_encoder_hidden_states.repeat_interleave(snake_case_ , dim=0 )
UpperCAmelCase_ : int = text_mask.repeat_interleave(snake_case_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : List[str]
if negative_prompt is None:
UpperCAmelCase_ : str = [''] * batch_size
elif type(snake_case_ ) is not type(snake_case_ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(snake_case_ )} !='''
F''' {type(snake_case_ )}.''' )
elif isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ : Tuple = [negative_prompt]
elif batch_size != len(snake_case_ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(snake_case_ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
UpperCAmelCase_ : Union[str, Any] = negative_prompt
UpperCAmelCase_ : Optional[int] = self.tokenizer(
snake_case_ , padding='max_length' , max_length=7_7 , truncation=snake_case_ , return_attention_mask=snake_case_ , add_special_tokens=snake_case_ , return_tensors='pt' , )
UpperCAmelCase_ : str = uncond_input.input_ids.to(snake_case_ )
UpperCAmelCase_ : List[str] = uncond_input.attention_mask.to(snake_case_ )
UpperCAmelCase_ : Optional[int] = self.text_encoder(
input_ids=snake_case_ , attention_mask=snake_case_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ : Union[str, Any] = negative_prompt_embeds.shape[1]
UpperCAmelCase_ : str = negative_prompt_embeds.repeat(1 , snake_case_ )
UpperCAmelCase_ : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case_ )
UpperCAmelCase_ : Tuple = uncond_text_encoder_hidden_states.shape[1]
UpperCAmelCase_ : int = uncond_text_encoder_hidden_states.repeat(1 , snake_case_ , 1 )
UpperCAmelCase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , snake_case_ , -1 )
UpperCAmelCase_ : Any = uncond_text_mask.repeat_interleave(snake_case_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCAmelCase_ : List[Any] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCAmelCase_ : Tuple = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def _UpperCamelCase ( self , snake_case_=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : int = torch.device(F'''cuda:{gpu_id}''' )
UpperCAmelCase_ : Tuple = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
def _UpperCamelCase ( self , snake_case_=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : Any = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : str = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCAmelCase_ : str = cpu_offload_with_hook(snake_case_ , snake_case_ , prev_module_hook=snake_case_ )
if self.safety_checker is not None:
UpperCAmelCase_ : List[str] = cpu_offload_with_hook(self.safety_checker , snake_case_ , prev_module_hook=snake_case_ )
# We'll offload the last model manually.
UpperCAmelCase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCamelCase ( self ):
'''simple docstring'''
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = 5_1_2 , snake_case_ = 5_1_2 , snake_case_ = 1_0_0 , snake_case_ = 4.0 , snake_case_ = 1 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ : Tuple = 1
elif isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ : Tuple = len(snake_case_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(snake_case_ )}''' )
UpperCAmelCase_ : Union[str, Any] = self._execution_device
UpperCAmelCase_ : Any = batch_size * num_images_per_prompt
UpperCAmelCase_ : Tuple = guidance_scale > 1.0
UpperCAmelCase_ : Optional[int] = self._encode_prompt(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ : Optional[Any] = torch.cat(snake_case_ , dim=0 )
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ : str = torch.cat(snake_case_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : str = image_embeds.repeat_interleave(snake_case_ , dim=0 )
UpperCAmelCase_ : Any = negative_image_embeds.repeat_interleave(snake_case_ , dim=0 )
UpperCAmelCase_ : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=snake_case_ )
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
UpperCAmelCase_ : List[str] = self.scheduler.timesteps
UpperCAmelCase_ : List[Any] = self.unet.config.in_channels
UpperCAmelCase_ : str = get_new_h_w(snake_case_ , snake_case_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Dict = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=snake_case_ , timestep=snake_case_ , encoder_hidden_states=snake_case_ , added_cond_kwargs=snake_case_ , return_dict=snake_case_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ : Optional[Any] = noise_pred.chunk(2 )
UpperCAmelCase_ : Union[str, Any] = variance_pred.chunk(2 )
UpperCAmelCase_ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : Tuple = self.scheduler.step(
snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ , ).prev_sample
# post-processing
UpperCAmelCase_ : str = self.movq.decode(snake_case_ , force_not_quantize=snake_case_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : Union[str, Any] = image * 0.5 + 0.5
UpperCAmelCase_ : str = image.clamp(0 , 1 )
UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : int = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 354
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : int = nn.Parameter(torch.zeros(1 , snake_case_ ) )
UpperCAmelCase_ : str = nn.Parameter(torch.ones(1 , snake_case_ ) )
def _UpperCamelCase ( self , snake_case_ = None , snake_case_ = None , ):
'''simple docstring'''
UpperCAmelCase_ : int = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
UpperCAmelCase_ : Tuple = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = (embeds * self.std) + self.mean
return embeds
| 274
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.