code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class A ( unittest.TestCase ):
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = 10
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = [1, 2, 3, 4]
lowercase__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__lowerCamelCase , self.block_size , 0 ) , __lowerCamelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lowercase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__lowerCamelCase , self.block_size , 0 ) , __lowerCamelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
lowercase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__lowerCamelCase , self.block_size , 0 ) , __lowerCamelCase )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
lowercase__ = process_story(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , [] )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = ''''''
lowercase__ = process_story(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , [] )
self.assertEqual(__lowerCamelCase , [] )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
lowercase__ = process_story(__lowerCamelCase )
lowercase__ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
lowercase__ = ['''It was the best of times.''']
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = torch.tensor([1, 2, 3, 4] )
lowercase__ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__lowerCamelCase , 0 ).numpy() , expected.numpy() )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
lowercase__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCamelCase , 23 ).numpy() , expected.numpy() )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowercase__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCamelCase , 1 ).numpy() , expected.numpy() )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = 101
lowercase__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
lowercase__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowercase__ = compute_token_type_ids(__lowerCamelCase , __lowerCamelCase )
np.testing.assert_array_equal(__lowerCamelCase , __lowerCamelCase )
| 164 |
"""simple docstring"""
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Any:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
if n == 0:
return 0
__A : int = float('''-inf''' )
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max(
snake_case_ ,prices[i - 1] + naive_cut_rod_recursive(n - i ,snake_case_ ) )
return max_revue
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->int:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
__A : Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_ ,snake_case_ ,snake_case_ )
def __lowercase ( snake_case_ : int ,snake_case_ : list ,snake_case_ : list ) ->Any:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__A : Any = float('''-inf''' )
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max(
snake_case_ ,prices[i - 1] + _top_down_cut_rod_recursive(n - i ,snake_case_ ,snake_case_ ) ,)
__A : Any = max_revenue
return max_rev[n]
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Any:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__A : Union[str, Any] = [float('''-inf''' ) for _ in range(n + 1 )]
__A : List[Any] = 0
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max_rev[i]
for j in range(1 ,i + 1 ):
__A : str = max(snake_case_ ,prices[j - 1] + max_rev[i - j] )
__A : List[str] = max_revenue_i
return max_rev[n]
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Union[str, Any]:
'''simple docstring'''
if n < 0:
__A : Union[str, Any] = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
__A : List[Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F"""Got n = {n} but length of prices = {len(snake_case_ )}"""
)
raise ValueError(snake_case_ )
def __lowercase ( ) ->str:
'''simple docstring'''
__A : Any = [6, 10, 12, 15, 20, 23]
__A : Union[str, Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__A : str = 36
__A : Any = top_down_cut_rod(snake_case_ ,snake_case_ )
__A : Any = bottom_up_cut_rod(snake_case_ ,snake_case_ )
__A : Optional[Any] = naive_cut_rod_recursive(snake_case_ ,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 179 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : List[Any]=2, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : List[str]=1_0, _UpperCAmelCase : Any=3, _UpperCAmelCase : Tuple=3_2 * 4, _UpperCAmelCase : List[str]=3_2 * 6, _UpperCAmelCase : List[str]=4, _UpperCAmelCase : Any=3_2, ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE__ : Optional[int] = use_auxiliary_loss
SCREAMING_SNAKE_CASE__ : str = num_queries
SCREAMING_SNAKE_CASE__ : str = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_size
SCREAMING_SNAKE_CASE__ : Dict = max_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_labels
SCREAMING_SNAKE_CASE__ : Dict = mask_feature_size
def A_ ( self : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size], device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=_UpperCAmelCase ) > 0.5
).float()
SCREAMING_SNAKE_CASE__ : List[Any] = (torch.rand((self.batch_size, self.num_labels), device=_UpperCAmelCase ) > 0.5).long()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1], ), decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8, num_queries=self.num_queries, decoder_attention_heads=2, d_model=self.mask_feature_size, ), mask_feature_size=self.mask_feature_size, fpn_feature_size=self.mask_feature_size, num_channels=self.num_channels, num_labels=self.num_labels, )
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : int = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def A_ ( self : Union[str, Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = output.encoder_hidden_states
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE__ : Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ), config.decoder_config.decoder_layers )
def A_ ( self : int, _UpperCAmelCase : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[int]=False ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = MaskFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(pixel_values=_UpperCAmelCase, pixel_mask=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = model(_UpperCAmelCase, output_hidden_states=_UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.mask_feature_size), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Optional[int], _UpperCAmelCase : Any, _UpperCAmelCase : List[str], _UpperCAmelCase : int, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerForInstanceSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(pixel_values=_UpperCAmelCase, pixel_mask=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(
pixel_values=_UpperCAmelCase, pixel_mask=_UpperCAmelCase, mask_labels=_UpperCAmelCase, class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase_ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = MaskFormerModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict = ConfigTester(self, config_class=_UpperCAmelCase, has_text_modality=_UpperCAmelCase )
def A_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCAmelCase, **_UpperCAmelCase, output_hidden_states=_UpperCAmelCase )
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def A_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def A_ ( self : str ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def A_ ( self : Dict ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
pass
def A_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
@slow
def A_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
SCREAMING_SNAKE_CASE__ : str = MaskFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE__ : List[str] = {
"pixel_values": torch.randn((2, 3, *size), device=_UpperCAmelCase ),
"mask_labels": torch.randn((2, 1_0, *size), device=_UpperCAmelCase ),
"class_labels": torch.zeros(2, 1_0, device=_UpperCAmelCase ).long(),
}
SCREAMING_SNAKE_CASE__ : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def A_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCAmelCase, **_UpperCAmelCase, output_hidden_states=_UpperCAmelCase )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = model(**_UpperCAmelCase, output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE__ : str = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Any = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ : int = model(_UpperCAmelCase, mask_labels=_UpperCAmelCase, class_labels=_UpperCAmelCase ).loss
loss.backward()
def A_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : int = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase, mask_labels=_UpperCAmelCase, class_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : int = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCamelCase : int = 1e-4
def _a ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self : int ) -> Dict:
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def A_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[str] = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(_UpperCAmelCase, return_tensors="pt" ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_UpperCAmelCase, (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], _UpperCAmelCase, atol=_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], _UpperCAmelCase, atol=_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], _UpperCAmelCase, atol=_UpperCAmelCase ) )
def A_ ( self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_UpperCAmelCase )
.eval()
)
SCREAMING_SNAKE_CASE__ : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE__ : int = prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(_UpperCAmelCase, return_tensors="pt" ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_UpperCAmelCase, (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**_UpperCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), )
SCREAMING_SNAKE_CASE__ : Dict = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], _UpperCAmelCase, atol=_UpperCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ : Any = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], _UpperCAmelCase, atol=_UpperCAmelCase ) )
def A_ ( self : Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(_UpperCAmelCase )
.eval()
)
SCREAMING_SNAKE_CASE__ : int = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(_UpperCAmelCase, return_tensors="pt" ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_UpperCAmelCase, (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**_UpperCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), )
SCREAMING_SNAKE_CASE__ : List[Any] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], _UpperCAmelCase, atol=_UpperCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ : int = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], _UpperCAmelCase, atol=_UpperCAmelCase ) )
def A_ ( self : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_UpperCAmelCase )
.eval()
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )], segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )], return_tensors="pt", )
SCREAMING_SNAKE_CASE__ : int = inputs["pixel_values"].to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = [el.to(_UpperCAmelCase ) for el in inputs["mask_labels"]]
SCREAMING_SNAKE_CASE__ : Tuple = [el.to(_UpperCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 371 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
_lowerCamelCase : Any = f"https://www.google.com/search?q={query}&num=100"
_lowerCamelCase : Optional[Any] = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
_lowerCamelCase : Union[str, Any] = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
_lowerCamelCase : Optional[Any] = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 191 | 0 |
from __future__ import annotations
from math import pow, sqrt
def __lowerCamelCase ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ):
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(SCREAMING_SNAKE_CASE_ , 2 ) - pow(SCREAMING_SNAKE_CASE_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(SCREAMING_SNAKE_CASE_ , 2 ) - pow(SCREAMING_SNAKE_CASE_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(SCREAMING_SNAKE_CASE_ , 2 ) + pow(SCREAMING_SNAKE_CASE_ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 252 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : str = StableUnCLIPPipeline
_a : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_a : Optional[Any] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 3_2
__lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
__lowerCAmelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
__lowerCAmelCase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL()
__lowerCAmelCase = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = pipe("anime turle" , generator=_A , output_type="np" )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 92 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
_lowerCamelCase : Union[str, Any] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
_lowerCamelCase : Any = 1 - (matter_density + radiation_density + dark_energy)
_lowerCamelCase : str = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_lowerCamelCase : Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowercase__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
) | 12 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[Any] = do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Optional[int] = pad_size
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None ):
_lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase )
_lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height
_lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width
return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad
_lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size
_lowerCamelCase : Dict = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_pad:
_lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images]
_lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 12 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : str ) -> int:
"""simple docstring"""
with open(__magic_name__ ) as metadata_file:
UpperCamelCase :int = json.load(__magic_name__ )
UpperCamelCase :List[Any] = LukeConfig(use_entity_aware_attention=__magic_name__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
UpperCamelCase :List[str] = torch.load(__magic_name__ , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
UpperCamelCase :Optional[int] = load_original_entity_vocab(__magic_name__ )
# add an entry for [MASK2]
UpperCamelCase :Optional[Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase :Any = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase :Union[str, Any] = AddedToken("""<ent>""" , lstrip=__magic_name__ , rstrip=__magic_name__ )
UpperCamelCase :Optional[int] = AddedToken("""<ent2>""" , lstrip=__magic_name__ , rstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """r""" ) as f:
UpperCamelCase :str = json.load(__magic_name__ )
UpperCamelCase :Optional[int] = """MLukeTokenizer"""
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ )
UpperCamelCase :Optional[int] = MLukeTokenizer.from_pretrained(__magic_name__ )
# Initialize the embeddings of the special tokens
UpperCamelCase :str = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
UpperCamelCase :Optional[int] = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
UpperCamelCase :str = state_dict["""embeddings.word_embeddings.weight"""]
UpperCamelCase :int = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase :Dict = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase :Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase :Dict = state_dict[bias_name]
UpperCamelCase :str = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase :Dict = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase :Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase :str = f"""encoder.layer.{layer_index}.attention.self."""
UpperCamelCase :str = state_dict[prefix + matrix_name]
UpperCamelCase :Any = state_dict[prefix + matrix_name]
UpperCamelCase :List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase :int = state_dict["""entity_embeddings.entity_embeddings.weight"""]
UpperCamelCase :Dict = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
UpperCamelCase :int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase :Union[str, Any] = state_dict["""entity_predictions.bias"""]
UpperCamelCase :List[Any] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
UpperCamelCase :Dict = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase :Union[str, Any] = LukeForMaskedLM(config=__magic_name__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
UpperCamelCase :int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
UpperCamelCase :Union[str, Any] = state_dict[key]
else:
UpperCamelCase :Optional[Any] = state_dict[key]
UpperCamelCase , UpperCamelCase :List[Any] = model.load_state_dict(__magic_name__ , strict=__magic_name__ )
if set(__magic_name__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__magic_name__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase :Dict = MLukeTokenizer.from_pretrained(__magic_name__ , task="""entity_classification""" )
UpperCamelCase :Dict = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
UpperCamelCase :Union[str, Any] = (0, 9)
UpperCamelCase :Optional[Any] = tokenizer(__magic_name__ , entity_spans=[span] , return_tensors="""pt""" )
UpperCamelCase :Union[str, Any] = model(**__magic_name__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase :Union[str, Any] = torch.Size((1, 33, 768) )
UpperCamelCase :Tuple = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase :Union[str, Any] = torch.Size((1, 1, 768) )
UpperCamelCase :Tuple = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase :Dict = MLukeTokenizer.from_pretrained(__magic_name__ )
UpperCamelCase :Optional[int] = """Tokyo is the capital of <mask>."""
UpperCamelCase :Optional[int] = (24, 30)
UpperCamelCase :Optional[Any] = tokenizer(__magic_name__ , entity_spans=[span] , return_tensors="""pt""" )
UpperCamelCase :List[str] = model(**__magic_name__ )
UpperCamelCase :Optional[Any] = encoding["""input_ids"""][0].tolist()
UpperCamelCase :Any = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
UpperCamelCase :str = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__magic_name__ )
UpperCamelCase :Tuple = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase :str = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__magic_name__ ) )
model.save_pretrained(__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Dict = ["""[MASK]""", """[PAD]""", """[UNK]"""]
UpperCamelCase :Dict = [json.loads(__magic_name__ ) for line in open(__magic_name__ )]
UpperCamelCase :int = {}
for entry in data:
UpperCamelCase :Dict = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase :int = entity_id
break
UpperCamelCase :Dict = f"""{language}:{entity_name}"""
UpperCamelCase :Optional[int] = entity_id
return new_mapping
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCAmelCase_ : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 38 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCAmelCase_ : Any = '''Create a default config file for Accelerate with only a few flags set.'''
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int]="no" , __magic_name__ : str = default_json_config_file , __magic_name__ : bool = False ) -> str:
"""simple docstring"""
UpperCamelCase :Any = Path(__magic_name__ )
path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCamelCase :Dict = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCamelCase :Optional[Any] = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
UpperCamelCase :Union[str, Any] = torch.cuda.device_count()
UpperCamelCase :List[Any] = num_gpus
UpperCamelCase :Dict = False
if num_gpus > 1:
UpperCamelCase :Any = """MULTI_GPU"""
else:
UpperCamelCase :Any = """NO"""
elif is_xpu_available() and use_xpu:
UpperCamelCase :Optional[Any] = torch.xpu.device_count()
UpperCamelCase :Optional[int] = num_xpus
UpperCamelCase :int = False
if num_xpus > 1:
UpperCamelCase :Union[str, Any] = """MULTI_XPU"""
else:
UpperCamelCase :Union[str, Any] = """NO"""
elif is_npu_available():
UpperCamelCase :List[Any] = torch.npu.device_count()
UpperCamelCase :Optional[Any] = num_npus
UpperCamelCase :Tuple = False
if num_npus > 1:
UpperCamelCase :Optional[Any] = """MULTI_NPU"""
else:
UpperCamelCase :List[Any] = """NO"""
else:
UpperCamelCase :Any = 0
UpperCamelCase :Optional[Any] = True
UpperCamelCase :Optional[Any] = 1
UpperCamelCase :List[str] = """NO"""
UpperCamelCase :int = ClusterConfig(**__magic_name__ )
config.to_json_file(__magic_name__ )
return path
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Dict = parser.add_parser("""default""" , parents=__magic_name__ , help=__magic_name__ , formatter_class=__magic_name__ )
parser.add_argument(
"""--config_file""" , default=__magic_name__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=__magic_name__ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=__magic_name__ )
return parser
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 38 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : List[str]=1_8 , UpperCAmelCase__ : Optional[int]=3_0 , UpperCAmelCase__ : Optional[int]=4_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Union[str, Any]=[0.48_145_466, 0.4_578_275, 0.40_821_073] , UpperCAmelCase__ : Union[str, Any]=[0.26_862_954, 0.26_130_258, 0.27_577_711] , UpperCAmelCase__ : Dict=True , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Any=False ) -> Any:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
__SCREAMING_SNAKE_CASE = [torch.from_numpy(UpperCAmelCase__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Tuple ) -> str:
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCAmelCase__ )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_convert_rgb" ) )
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 2_2_4, "width": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
pass
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
# Initialize image_processing
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : int ) -> Any:
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 3
@property
def UpperCAmelCase_ ( self : Any ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_convert_rgb" ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
pass
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 195 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
a__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase__ : WhisperForConditionalGeneration , UpperCAmelCase__ : WhisperProcessor , UpperCAmelCase__ : AutoencoderKL , UpperCAmelCase__ : CLIPTextModel , UpperCAmelCase__ : CLIPTokenizer , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase__ : StableDiffusionSafetyChecker , UpperCAmelCase__ : CLIPImageProcessor , ) -> Optional[int]:
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=UpperCAmelCase__ , speech_processor=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> str:
if slice_size == "auto":
__SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=1_6_0_0_0 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_0 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Dict , ) -> Any:
__SCREAMING_SNAKE_CASE = self.speech_processor.feature_extractor(
UpperCAmelCase__ , return_tensors="pt" , sampling_rate=UpperCAmelCase__ ).input_features.to(self.device )
__SCREAMING_SNAKE_CASE = self.speech_model.generate(UpperCAmelCase__ , max_length=4_8_0_0_0_0 )
__SCREAMING_SNAKE_CASE = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , normalize=UpperCAmelCase__ )[
0
]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = 1
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCAmelCase__ )}.""" )
# get prompt text embeddings
__SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length]
__SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = text_embeddings.shape
__SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
__SCREAMING_SNAKE_CASE = [""] * batch_size
elif type(UpperCAmelCase__ ) is not type(UpperCAmelCase__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase__ )} !="""
F""" {type(UpperCAmelCase__ )}.""" )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(UpperCAmelCase__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
__SCREAMING_SNAKE_CASE = negative_prompt
__SCREAMING_SNAKE_CASE = text_input_ids.shape[-1]
__SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase__ , padding="max_length" , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1]
__SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__SCREAMING_SNAKE_CASE = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device="cpu" , dtype=UpperCAmelCase__ ).to(
self.device )
else:
__SCREAMING_SNAKE_CASE = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__SCREAMING_SNAKE_CASE = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__SCREAMING_SNAKE_CASE = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__SCREAMING_SNAKE_CASE = {}
if accepts_eta:
__SCREAMING_SNAKE_CASE = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
__SCREAMING_SNAKE_CASE = self.unet(UpperCAmelCase__ , UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1 / 0.18_215 * latents
__SCREAMING_SNAKE_CASE = self.vae.decode(UpperCAmelCase__ ).sample
__SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase__ , nsfw_content_detected=UpperCAmelCase__ )
| 195 | 1 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = 'laion/clap-htsat-unfused'
_lowercase : Optional[Any] = tempfile.mkdtemp()
def UpperCamelCase ( self, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint, **lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> str:
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint, **lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.get_tokenizer()
_lowercase : Optional[int] = self.get_feature_extractor()
_lowercase : int = ClapProcessor(tokenizer=lowerCamelCase, feature_extractor=lowerCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowercase : Union[str, Any] = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, lowerCamelCase)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = ClapProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
_lowercase : str = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
_lowercase : List[str] = self.get_feature_extractor(do_normalize=lowerCamelCase, padding_value=1.0)
_lowercase : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCamelCase, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, lowerCamelCase)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = self.get_feature_extractor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Dict = ClapProcessor(tokenizer=lowerCamelCase, feature_extractor=lowerCamelCase)
_lowercase : Union[str, Any] = floats_list((3, 10_00))
_lowercase : str = feature_extractor(lowerCamelCase, return_tensors='np')
_lowercase : Tuple = processor(audios=lowerCamelCase, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = self.get_feature_extractor()
_lowercase : Any = self.get_tokenizer()
_lowercase : Dict = ClapProcessor(tokenizer=lowerCamelCase, feature_extractor=lowerCamelCase)
_lowercase : Any = 'This is a test string'
_lowercase : List[Any] = processor(text=lowerCamelCase)
_lowercase : Dict = tokenizer(lowerCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = self.get_feature_extractor()
_lowercase : str = self.get_tokenizer()
_lowercase : List[str] = ClapProcessor(tokenizer=lowerCamelCase, feature_extractor=lowerCamelCase)
_lowercase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : str = processor.batch_decode(lowerCamelCase)
_lowercase : Any = tokenizer.batch_decode(lowerCamelCase)
self.assertListEqual(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = self.get_feature_extractor()
_lowercase : Any = self.get_tokenizer()
_lowercase : Tuple = ClapProcessor(tokenizer=lowerCamelCase, feature_extractor=lowerCamelCase)
self.assertListEqual(
processor.model_input_names[2:], feature_extractor.model_input_names, msg='`processor` and `feature_extractor` model input names do not match', )
| 21 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : List[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _lowerCamelCase( _a ):
lowercase_ : Any = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
SCREAMING_SNAKE_CASE : Any = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
SCREAMING_SNAKE_CASE : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class _lowerCamelCase:
def __call__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
elif titles is None or texts is None:
_lowercase : Dict = titles if texts is None else texts
return super().__call__(
lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
_lowercase : Union[str, Any] = titles if not isinstance(lowerCamelCase, lowerCamelCase) else [titles]
_lowercase : Tuple = texts if not isinstance(lowerCamelCase, lowerCamelCase) else [texts]
_lowercase : Optional[Any] = len(lowerCamelCase)
_lowercase : Any = questions if not isinstance(lowerCamelCase, lowerCamelCase) else [questions] * n_passages
if len(lowerCamelCase) != len(lowerCamelCase):
raise ValueError(
F'''There should be as many titles than texts but got {len(lowerCamelCase)} titles and {len(lowerCamelCase)} texts.''')
_lowercase : Any = super().__call__(lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : Tuple = super().__call__(lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : int = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase, lowerCamelCase)
]
}
if return_attention_mask is not False:
_lowercase : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_lowercase : Union[str, Any] = attention_mask
return self.pad(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 16, lowerCamelCase = 64, lowerCamelCase = 4, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : Union[str, Any] = reader_input['input_ids']
_lowercase , _lowercase , _lowercase : Tuple = reader_output[:3]
_lowercase : Tuple = len(lowerCamelCase)
_lowercase : str = sorted(range(lowerCamelCase), reverse=lowerCamelCase, key=relevance_logits.__getitem__)
_lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowercase : str = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_lowercase : Any = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowercase : List[Any] = sequence_ids.index(self.pad_token_id)
else:
_lowercase : List[str] = len(lowerCamelCase)
_lowercase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCamelCase, top_spans=lowerCamelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCamelCase, start_index=lowerCamelCase, end_index=lowerCamelCase, text=self.decode(sequence_ids[start_index : end_index + 1]), ))
if len(lowerCamelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : str = []
for start_index, start_score in enumerate(lowerCamelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_lowercase : Dict = sorted(lowerCamelCase, key=lambda lowerCamelCase: x[1], reverse=lowerCamelCase)
_lowercase : List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''')
_lowercase : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCamelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class _lowerCamelCase( _a, _a ):
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Any = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
lowercase_ : str = ["""input_ids""", """attention_mask"""]
| 21 | 1 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__A : List[str] = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def UpperCamelCase_ ( A__ : Any ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
__A : List[Any] = parser.parse_args()
if args.check_lib:
__A : List[str] = importlib.import_module("transformers")
__A : Any = Path(transformers_module.__file__).parent
else:
__A : Dict = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 364 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@property
def __lowercase ( self : str ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __lowercase ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase_ : Optional[int] = self.dummy_uncond_unet
lowerCAmelCase_ : Tuple = PNDMScheduler()
lowerCAmelCase_ : List[Any] = PNDMPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pndm.to(lowerCamelCase )
pndm.set_progress_bar_config(disable=lowerCamelCase )
lowerCAmelCase_ : Dict = torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = pndm(generator=lowerCamelCase , num_inference_steps=20 , output_type="""numpy""" ).images
lowerCAmelCase_ : str = torch.manual_seed(0 )
lowerCAmelCase_ : int = pndm(generator=lowerCamelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=lowerCamelCase )[0]
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : str ) -> Tuple:
lowerCAmelCase_ : str = """google/ddpm-cifar10-32"""
lowerCAmelCase_ : Dict = UNetaDModel.from_pretrained(lowerCamelCase )
lowerCAmelCase_ : Dict = PNDMScheduler()
lowerCAmelCase_ : Union[str, Any] = PNDMPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pndm.to(lowerCamelCase )
pndm.set_progress_bar_config(disable=lowerCamelCase )
lowerCAmelCase_ : Any = torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pndm(generator=lowerCamelCase , output_type="""numpy""" ).images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ : List[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 89 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : str = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 286 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
def __call__( self ):
"""simple docstring"""
A_ : Optional[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
A_ : List[str] = 1
A_ : List[str] = self.unet(snake_case_ , snake_case_ ).sample
A_ : Optional[int] = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
A_ : List[Any] = scheduler_output - scheduler_output + torch.ones_like(snake_case_ )
return result | 286 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__UpperCamelCase : List[Any] = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 351 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = '▁'
__UpperCamelCase : str = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCamelCase : Tuple = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCamelCase : List[str] = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__UpperCamelCase : Dict = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = []
UpperCamelCase_ = []
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : Union[str, Any]="</s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : List[Any]="<s>" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : Tuple="<pad>" , UpperCamelCase__ : str="<mask>" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Dict[str, Any]] = None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE : str = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Dict = len(self.sp_model )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase__ )
}
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
SCREAMING_SNAKE_CASE : List[str] = src_lang if src_lang is not None else '''en_XX'''
SCREAMING_SNAKE_CASE : Any = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __A ( self : int ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __A ( self : Tuple , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
SCREAMING_SNAKE_CASE : Dict = src_lang
SCREAMING_SNAKE_CASE : Union[str, Any] = self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.convert_tokens_to_ids(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = tgt_lang_id
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self : Any , UpperCamelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.PieceToId(UpperCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __A ( self : Dict , UpperCamelCase__ : str ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A ( self : int , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ''' ''' ).strip()
return out_string
def __A ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def __A ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en_XX" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro_RO" , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = src_lang
SCREAMING_SNAKE_CASE : List[str] = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self : List[str] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
def __A ( self : List[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.lang_code_to_id[lang]
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id, self.cur_lang_code]
| 258 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
a__ = {
'''gpt-neox-20b''': 2048,
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Tuple = ["input_ids", "attention_mask"]
def __init__( self , _a=None , _a=None , _a=None , _a="<|endoftext|>" , _a="<|endoftext|>" , _a="<|endoftext|>" , _a=False , **_a , ) -> Union[str, Any]:
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , add_prefix_space=_a , **_a , )
_a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_a : Dict = getattr(_a , pre_tok_state.pop('''type''' ) )
_a : Tuple = add_prefix_space
_a : Optional[Any] = pre_tok_class(**_a )
_a : Optional[int] = add_prefix_space
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
_a : Union[str, Any] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __lowercase ( self , _a ) -> List[int]:
_a : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
_a : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 235 |
a__ = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 235 | 1 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__UpperCAmelCase = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def snake_case_ (__A : Optional[int] ) -> Optional[int]:
__lowerCAmelCase : List[str] = {}
state_dict.pop("""pixel_mean""" , __A )
state_dict.pop("""pixel_std""" , __A )
__lowerCAmelCase : Any = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowerCAmelCase : Tuple = key.replace(__A , __A )
if re.match(__A , __A ):
__lowerCAmelCase : List[Any] = int(re.match(__A , __A ).group(2 ) )
if layer_nb == 0:
__lowerCAmelCase : List[Any] = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
__lowerCAmelCase : Tuple = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
__lowerCAmelCase : Dict = key.replace("""layers.2""" , """proj_out""" )
__lowerCAmelCase : int = value
__lowerCAmelCase : int = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def snake_case_ (__A : Optional[Any] , __A : Tuple , __A : Any , __A : List[str]="ybelkada/segment-anything" ) -> Optional[int]:
__lowerCAmelCase : Any = hf_hub_download(__A , f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
__lowerCAmelCase : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
__lowerCAmelCase : Any = SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
__lowerCAmelCase : str = SamConfig(
vision_config=__A , )
elif "sam_vit_h" in model_name:
__lowerCAmelCase : Optional[Any] = SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
__lowerCAmelCase : Optional[int] = SamConfig(
vision_config=__A , )
__lowerCAmelCase : List[str] = torch.load(__A , map_location="""cpu""" )
__lowerCAmelCase : int = replace_keys(__A )
__lowerCAmelCase : Optional[int] = SamImageProcessor()
__lowerCAmelCase : int = SamProcessor(image_processor=__A )
__lowerCAmelCase : Any = SamModel(__A )
hf_model.load_state_dict(__A )
__lowerCAmelCase : Union[str, Any] = hf_model.to("""cuda""" )
__lowerCAmelCase : Any = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
__lowerCAmelCase : Optional[int] = Image.open(requests.get(__A , stream=__A ).raw ).convert("""RGB""" )
__lowerCAmelCase : Any = [[[4_0_0, 6_5_0]]]
__lowerCAmelCase : Optional[int] = [[1]]
__lowerCAmelCase : Optional[Any] = processor(images=np.array(__A ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__lowerCAmelCase : Tuple = hf_model(**__A )
__lowerCAmelCase : str = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
__lowerCAmelCase : Dict = processor(
images=np.array(__A ) , input_points=__A , input_labels=__A , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__lowerCAmelCase : List[Any] = hf_model(**__A )
__lowerCAmelCase : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
__lowerCAmelCase : Union[str, Any] = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
__lowerCAmelCase : List[str] = processor(images=np.array(__A ) , input_boxes=__A , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__lowerCAmelCase : Optional[int] = hf_model(**__A )
__lowerCAmelCase : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
__lowerCAmelCase : Optional[Any] = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
__lowerCAmelCase : str = [[1, 1]]
__lowerCAmelCase : Optional[Any] = processor(
images=np.array(__A ) , input_points=__A , input_labels=__A , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__lowerCAmelCase : Optional[int] = hf_model(**__A )
__lowerCAmelCase : List[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
__UpperCAmelCase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 358 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : str=2 , lowerCAmelCase : Optional[Any]=56 , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Any=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=2 , lowerCAmelCase : str=7 , lowerCAmelCase : List[Any]="gelu_new" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Optional[Any]=5_12 , lowerCAmelCase : Dict=16 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Union[str, Any]="block_sparse" , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : List[Any]=3 , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : List[Any] = seq_length
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Union[str, Any] = use_attention_mask
__lowerCAmelCase : Tuple = use_token_type_ids
__lowerCAmelCase : Union[str, Any] = use_labels
__lowerCAmelCase : List[str] = vocab_size
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : Tuple = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : str = type_vocab_size
__lowerCAmelCase : List[Any] = type_sequence_label_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : str = num_choices
__lowerCAmelCase : Any = rescale_embeddings
__lowerCAmelCase : str = attention_type
__lowerCAmelCase : List[Any] = use_bias
__lowerCAmelCase : List[str] = block_size
__lowerCAmelCase : Union[str, Any] = num_random_blocks
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Any = None
if self.use_token_type_ids:
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[Any] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Union[str, Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase : List[str] =False
lowerCamelCase : Union[str, Any] =False
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase : List[str] = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : List[str] = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : Union[str, Any] ):
return model(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : str = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : List[Any] = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=1e-5 , lowerCAmelCase : Union[str, Any]="outputs" , lowerCAmelCase : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 139 | 0 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class A_ :
"""simple docstring"""
def __init__( self :str , lowercase_ :str , lowercase_ :int=13 , lowercase_ :Optional[Any]=7 , lowercase_ :List[Any]=True , lowercase_ :List[Any]=True , lowercase_ :List[str]=False , lowercase_ :Optional[Any]=True , lowercase_ :List[Any]=99 , lowercase_ :List[str]=64 , lowercase_ :int=5 , lowercase_ :List[str]=4 , lowercase_ :Any=64 , lowercase_ :int="gelu" , lowercase_ :Optional[int]=0.1 , lowercase_ :Union[str, Any]=0.1 , lowercase_ :Union[str, Any]=5_12 , lowercase_ :List[Any]=16 , lowercase_ :Optional[Any]=2 , lowercase_ :str=0.02 , lowercase_ :Any=3 , lowercase_ :Tuple=4 , lowercase_ :Optional[Any]=None , ) -> int:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]:
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def UpperCAmelCase__ ( self :Tuple ) -> int:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self :List[str] ) -> List[str]:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[int] , lowercase_ :List[Any] , lowercase_ :Tuple , lowercase_ :Union[str, Any] , lowercase_ :str , lowercase_ :Any ) -> Any:
UpperCAmelCase = MPNetModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , lowercase_ )
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self :Dict , lowercase_ :str , lowercase_ :Any , lowercase_ :Any , lowercase_ :Union[str, Any] , lowercase_ :str , lowercase_ :int ) -> int:
UpperCAmelCase = MPNetForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self :Dict , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Optional[int] , lowercase_ :Dict , lowercase_ :List[Any] , lowercase_ :List[str] ) -> Optional[int]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = MPNetForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Any , lowercase_ :Union[str, Any] , lowercase_ :Dict , lowercase_ :Optional[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[int] ) -> Any:
UpperCAmelCase = self.num_choices
UpperCAmelCase = MPNetForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Tuple , lowercase_ :str , lowercase_ :Tuple , lowercase_ :Any , lowercase_ :str , lowercase_ :Union[str, Any] ) -> Dict:
UpperCAmelCase = self.num_labels
UpperCAmelCase = MPNetForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self :Any ) -> Dict:
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = True
def UpperCAmelCase__ ( self :Optional[Any] ) -> int:
UpperCAmelCase = MPNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase__ ( self :Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> int:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ )
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Dict ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ )
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self :Dict ) -> List[str]:
UpperCAmelCase = MPNetModel.from_pretrained('microsoft/mpnet-base' )
UpperCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
UpperCAmelCase = model(lowercase_ )[0]
UpperCAmelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 78 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __a ( A__ ):
_lowerCAmelCase : str = '''facebook/bart-large-mnli'''
_lowerCAmelCase : Tuple = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
_lowerCAmelCase : Any = '''text_classifier'''
_lowerCAmelCase : int = AutoTokenizer
_lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification
_lowerCAmelCase : Union[str, Any] = ['''text''', ['''text''']]
_lowerCAmelCase : Dict = ['''text''']
def __lowercase ( self : int ):
'''simple docstring'''
super().setup()
UpperCamelCase__ : Dict = self.model.config
UpperCamelCase__ : Union[str, Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCamelCase__ : List[str] = int(SCREAMING_SNAKE_CASE )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = labels
return self.pre_processor(
[text] * len(SCREAMING_SNAKE_CASE ) , [F'This example is {label}' for label in labels] , return_tensors="pt" , padding="max_length" , )
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = outputs.logits
UpperCamelCase__ : Any = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 189 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 16
lowercase_ = 32
def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int = 16 ) -> Optional[Any]:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('bert-base-cased' )
A__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE__ , padding='longest' , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
A__ = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase_ = mocked_dataloaders # noqa: F811
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ) -> str:
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE__ ) == "1":
A__ = 2
# New Code #
A__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['lr']
A__ = int(config['num_epochs'] )
A__ = int(config['seed'] )
A__ = int(config['batch_size'] )
A__ = evaluate.load('glue' , 'mrpc' )
set_seed(SCREAMING_SNAKE_CASE__ )
A__ , A__ = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE__ ):
A__ = model(**SCREAMING_SNAKE_CASE__ )
A__ = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> Optional[Any]:
'''simple docstring'''
A__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
A__ = parser.parse_args()
A__ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 354 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
A__ = update_area_of_max_square(SCREAMING_SNAKE_CASE__ , col + 1 )
A__ = update_area_of_max_square(row + 1 , col + 1 )
A__ = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
A__ = 1 + min([right, diagonal, down] )
A__ = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
return sub_problem_sol
else:
return 0
A__ = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
A__ = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
A__ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE__ )
A__ = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
A__ = 1 + min([right, diagonal, down] )
A__ = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
A__ = sub_problem_sol
return sub_problem_sol
else:
return 0
A__ = [0]
A__ = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE__ )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE__ )
return largest_square_area[0]
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
'''simple docstring'''
A__ = [[0] * (cols + 1) for _ in range(rows + 1 )]
A__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
A__ = dp_array[row][col + 1]
A__ = dp_array[row + 1][col + 1]
A__ = dp_array[row + 1][col]
if mat[row][col] == 1:
A__ = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = max(dp_array[row][col] , SCREAMING_SNAKE_CASE__ )
else:
A__ = 0
return largest_square_area
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
'''simple docstring'''
A__ = [0] * (cols + 1)
A__ = [0] * (cols + 1)
A__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
A__ = current_row[col + 1]
A__ = next_row[col + 1]
A__ = next_row[col]
if mat[row][col] == 1:
A__ = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = max(current_row[col] , SCREAMING_SNAKE_CASE__ )
else:
A__ = 0
A__ = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 282 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=4_00 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __SCREAMING_SNAKE_CASE=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
lowercase_ : str = size if size is not None else {'height': 2_24, 'width': 2_24}
lowercase_ : Tuple = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowercase_ : Dict = parent
lowercase_ : Dict = batch_size
lowercase_ : Any = num_channels
lowercase_ : Any = image_size
lowercase_ : Optional[int] = min_resolution
lowercase_ : Tuple = max_resolution
lowercase_ : int = do_resize
lowercase_ : Union[str, Any] = size
lowercase_ : Optional[int] = do_center_crop
lowercase_ : Optional[int] = crop_size
lowercase_ : str = do_normalize
lowercase_ : List[str] = image_mean
lowercase_ : str = image_std
lowercase_ : Union[str, Any] = do_convert_rgb
def _snake_case ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _snake_case ( self , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowercase_ : Optional[int] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowercase_ : Tuple = []
for i in range(self.batch_size ):
lowercase_ : Tuple = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowercase_ : List[Any] = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowercase_ : Any = [torch.from_numpy(__SCREAMING_SNAKE_CASE ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowerCAmelCase__ ( __UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase_ = ChineseCLIPImageProcessor if is_vision_available() else None
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 2_24, '''width''': 2_24} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowercase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowercase_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase_ : str = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowercase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase_ : List[Any] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase_ : str = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase_ = ChineseCLIPImageProcessor if is_vision_available() else None
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = 3
@property
def _snake_case ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowercase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase_ : Optional[Any] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 93 |
def A_ ( A__ ) -> int:
stooge(A__ , 0 , len(A__ ) - 1 )
return arr
def A_ ( A__ , A__ , A__ ) -> List[Any]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
a__ , a__ : List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
a__ : Dict = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(A__ , A__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(A__ , i + t , (A__) )
# Recursively sort first 2/3 elements
stooge(A__ , A__ , (h - t) )
if __name__ == "__main__":
lowercase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
lowercase : Dict = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 99 | 0 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __lowerCamelCase ( __UpperCamelCase ):
__UpperCamelCase = """owlvit_text_model"""
def __init__(self , lowerCamelCase=49_408 , lowerCamelCase=512 , lowerCamelCase=2_048 , lowerCamelCase=12 , lowerCamelCase=8 , lowerCamelCase=16 , lowerCamelCase="quick_gelu" , lowerCamelCase=1e-5 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1.0 , lowerCamelCase=0 , lowerCamelCase=49_406 , lowerCamelCase=49_407 , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_act
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = initializer_range
_lowerCAmelCase = initializer_factor
@classmethod
def A__ (cls , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase )
_lowerCAmelCase = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
_lowerCAmelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __lowerCamelCase ( __UpperCamelCase ):
__UpperCamelCase = """owlvit_vision_model"""
def __init__(self , lowerCamelCase=768 , lowerCamelCase=3_072 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3 , lowerCamelCase=768 , lowerCamelCase=32 , lowerCamelCase="quick_gelu" , lowerCamelCase=1e-5 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1.0 , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = initializer_range
_lowerCAmelCase = initializer_factor
@classmethod
def A__ (cls , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase )
_lowerCAmelCase = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
_lowerCAmelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __lowerCamelCase ( __UpperCamelCase ):
__UpperCamelCase = """owlvit"""
__UpperCamelCase = True
def __init__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=2.6592 , lowerCamelCase=True , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
if text_config is None:
_lowerCAmelCase = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
_lowerCAmelCase = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
_lowerCAmelCase = OwlViTTextConfig(**lowerCamelCase )
_lowerCAmelCase = OwlViTVisionConfig(**lowerCamelCase )
_lowerCAmelCase = projection_dim
_lowerCAmelCase = logit_scale_init_value
_lowerCAmelCase = return_dict
_lowerCAmelCase = 1.0
@classmethod
def A__ (cls , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase )
_lowerCAmelCase = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = {}
_lowerCAmelCase = text_config
_lowerCAmelCase = vision_config
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = self.text_config.to_dict()
_lowerCAmelCase = self.vision_config.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
class __lowerCamelCase ( __UpperCamelCase ):
@property
def A__ (self ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def A__ (self ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def A__ (self ):
'''simple docstring'''
return 1e-4
def A__ (self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = None , ):
'''simple docstring'''
_lowerCAmelCase = super().generate_dummy_inputs(
processor.tokenizer , batch_size=lowerCamelCase , seq_length=lowerCamelCase , framework=lowerCamelCase )
_lowerCAmelCase = super().generate_dummy_inputs(
processor.image_processor , batch_size=lowerCamelCase , framework=lowerCamelCase )
return {**text_input_dict, **image_input_dict}
@property
def A__ (self ):
'''simple docstring'''
return 14 | 356 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 317 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCAmelCase ( metaclass=_lowerCamelCase ):
__lowercase = ['''note_seq''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
requires_backends(self , ['note_seq'] )
@classmethod
def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
requires_backends(cls , ['note_seq'] )
@classmethod
def lowerCamelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
requires_backends(cls , ['note_seq'] )
| 42 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = '''beit'''
def __init__( self ,SCREAMING_SNAKE_CASE__=81_92 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-12 ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=[3, 5, 7, 11] ,SCREAMING_SNAKE_CASE__=[1, 2, 3, 6] ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=0.4 ,SCREAMING_SNAKE_CASE__=2_56 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=2_55 ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = vocab_size
__SCREAMING_SNAKE_CASE :Dict = hidden_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE :Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE :str = layer_norm_eps
__SCREAMING_SNAKE_CASE :int = image_size
__SCREAMING_SNAKE_CASE :Tuple = patch_size
__SCREAMING_SNAKE_CASE :Any = num_channels
__SCREAMING_SNAKE_CASE :Any = use_mask_token
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_relative_position_bias
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE :List[str] = layer_scale_init_value
__SCREAMING_SNAKE_CASE :Optional[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE :str = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE :Dict = out_indices
__SCREAMING_SNAKE_CASE :Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE :Optional[int] = use_auxiliary_head
__SCREAMING_SNAKE_CASE :Union[str, Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE :Dict = auxiliary_channels
__SCREAMING_SNAKE_CASE :Optional[int] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE :List[str] = auxiliary_concat_input
__SCREAMING_SNAKE_CASE :List[Any] = semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self ) -> float:
"""simple docstring"""
return 1E-4 | 191 | 0 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : list[tuple[float, float]] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_UpperCamelCase = len(lowerCAmelCase__ ) - 1
def snake_case__ ( self : Any , lowerCAmelCase__ : float ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_UpperCamelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , lowerCAmelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCAmelCase__ ) , 5 ) == 1
return output_values
def snake_case__ ( self : int , lowerCAmelCase__ : float ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_UpperCamelCase = self.basis_function(lowerCAmelCase__ )
_UpperCamelCase = 0.0
_UpperCamelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def snake_case__ ( self : List[str] , lowerCAmelCase__ : float = 0.01 ) -> Union[str, Any]:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
_UpperCamelCase = [] # x coordinates of points to plot
_UpperCamelCase = [] # y coordinates of points to plot
_UpperCamelCase = 0.0
while t <= 1:
_UpperCamelCase = self.bezier_curve_function(lowerCAmelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_UpperCamelCase = [i[0] for i in self.list_of_points]
_UpperCamelCase = [i[1] for i in self.list_of_points]
plt.plot(
lowerCAmelCase__ , lowerCAmelCase__ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(lowerCAmelCase__ , lowerCAmelCase__ , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 365 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = 'biogpt'
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=42384 , lowerCAmelCase__ : Optional[int]=1024 , lowerCAmelCase__ : List[str]=24 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : Optional[int]=4096 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Tuple=1e-1_2 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : Optional[Any]=2 , **lowerCAmelCase__ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_cache
_UpperCamelCase = layerdrop
_UpperCamelCase = activation_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 287 | 0 |
def lowerCamelCase__ ( A__ : float , A__ : float , A__ : float , A__ : float , A__ : float , ):
'''simple docstring'''
__lowerCamelCase = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
__lowerCamelCase = 1 - (matter_density + radiation_density + dark_energy)
__lowerCamelCase = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__lowerCamelCase = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
UpperCAmelCase_ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 12 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "):
lowercase__ : Union[str, Any] = text.split(_lowerCamelCase)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)]
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ , lowercase__ : List[str] = [], []
for title, text in zip(documents["title"] , documents["text"]):
if text is not None:
for passage in split_text(_lowerCamelCase):
titles.append(title if title is not None else "")
texts.append(_lowerCamelCase)
return {"title": titles, "text": texts}
def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast):
lowercase__ : Union[str, Any] = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"]
lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ):
######################################
logger.info("Step 1 - Create the dataset")
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ : str = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase)
lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase__ : List[Any] = Features(
{"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space
lowercase__ : List[Any] = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset")
dataset.save_to_disk(_lowerCamelCase)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset")
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase)
# And save the index
lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss")
dataset.get_index("embeddings").save(_lowerCamelCase)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class snake_case_ :
__A : str = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,)
__A : Optional[str] = field(
default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,)
__A : str = field(
default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,)
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} ,)
__A : Optional[str] = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,)
@dataclass
class snake_case_ :
__A : Optional[int] = field(
default=__A ,metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} ,)
__A : int = field(
default=16 ,metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} ,)
@dataclass
class snake_case_ :
__A : int = field(
default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,)
__A : int = field(
default=128 ,metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 333 | import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False):
try:
lowercase__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skip("Test was skipped")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None):
if test_case is None:
return partial(_lowerCamelCase , version=_lowerCamelCase)
return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase)
UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase)
class snake_case_ ( unittest.TestCase ):
__A : int = True
@classmethod
def __UpperCamelCase ( cls : str ) -> str:
lowercase__ : str = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self : str ) -> Optional[int]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str:
lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = AcceleratorState()
lowercase__ : Optional[int] = tensor[None].clone().to(state.device)
lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu()
lowercase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowerCamelCase):
return False
return True
class snake_case_ :
def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : int = returncode
lowercase__ : Dict = stdout
lowercase__ : List[Any] = stderr
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str):
while True:
lowercase__ : int = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : Tuple = []
lowercase__ : List[Any] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True):
lowercase__ : Optional[Any] = asyncio.get_event_loop()
lowercase__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : str = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Dict = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False):
try:
lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowerCamelCase , "decode"):
lowercase__ : Optional[Any] = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 333 | 1 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
return " ".join(
''.join(word[::-1] ) if len(__SCREAMING_SNAKE_CASE ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 195 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase = mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
lowercase = max(
mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , j - wt[i - 1] ) + val[i - 1] , )
lowercase = val
return f[i][j]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if not (isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
lowercase = len(__SCREAMING_SNAKE_CASE )
if num_items != len(__SCREAMING_SNAKE_CASE ):
lowercase = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(__SCREAMING_SNAKE_CASE )} values'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
if not isinstance(wt[i] , __SCREAMING_SNAKE_CASE ):
lowercase = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = knapsack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = set()
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return optimal_val, example_optional_set
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
optimal_set.add(__SCREAMING_SNAKE_CASE )
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , j - wt[i - 1] , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = [3, 2, 4, 4]
UpperCAmelCase = [4, 3, 2, 3]
UpperCAmelCase = 4
UpperCAmelCase = 6
UpperCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCAmelCase , UpperCAmelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCAmelCase , UpperCAmelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 195 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_a : str = logging.get_logger(__name__)
_a : int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : Optional[int] ) -> List[Any]:
for attribute in key.split(""".""" ):
_lowerCAmelCase : Any = getattr(_lowerCamelCase ,_lowerCamelCase )
if weight_type is not None:
_lowerCAmelCase : Optional[int] = getattr(_lowerCamelCase ,_lowerCamelCase ).shape
else:
_lowerCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
_lowerCAmelCase : Any = value
elif weight_type == "weight_g":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_v":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "bias":
_lowerCAmelCase : Dict = value
else:
_lowerCAmelCase : Union[str, Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : List[str] ,_lowerCamelCase : List[str] ) -> int:
_lowerCAmelCase : Any = []
_lowerCAmelCase : str = fairseq_model.state_dict()
_lowerCAmelCase : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : str = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,hf_model.config.feat_extract_norm == """group""" ,)
_lowerCAmelCase : int = True
else:
for key, mapped_key in MAPPING.items():
_lowerCAmelCase : str = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
_lowerCAmelCase : Tuple = True
if "*" in mapped_key:
_lowerCAmelCase : Union[str, Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase : Dict = mapped_key.replace("""*""" ,_lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : Optional[Any] = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase : List[Any] = """weight_v"""
elif "weight" in name:
_lowerCAmelCase : List[Any] = """weight"""
elif "bias" in name:
_lowerCAmelCase : Union[str, Any] = """bias"""
else:
_lowerCAmelCase : List[Any] = None
set_recursively(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ,_lowerCamelCase : int ,_lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase : List[str] = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : List[Any] = name.split(""".""" )
_lowerCAmelCase : str = int(items[0] )
_lowerCAmelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_lowerCAmelCase : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_lowerCAmelCase : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_lowerCAmelCase : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_lowerCAmelCase : List[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str]=None ,_lowerCamelCase : List[str]=None ,_lowerCamelCase : str=True ) -> Optional[Any]:
if config_path is not None:
_lowerCAmelCase : Optional[Any] = HubertConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = HubertConfig()
if is_finetuned:
if dict_path:
_lowerCAmelCase : Any = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase : Optional[int] = target_dict.pad_index
_lowerCAmelCase : List[Any] = target_dict.bos_index
_lowerCAmelCase : Tuple = target_dict.eos_index
_lowerCAmelCase : Optional[Any] = len(target_dict.symbols )
_lowerCAmelCase : int = os.path.join(_lowerCamelCase ,"""vocab.json""" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
with open(_lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices ,_lowerCamelCase )
_lowerCAmelCase : Dict = WavaVecaCTCTokenizer(
_lowerCamelCase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=_lowerCamelCase ,)
_lowerCAmelCase : Dict = True if config.feat_extract_norm == """layer""" else False
_lowerCAmelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=_lowerCamelCase ,return_attention_mask=_lowerCamelCase ,)
_lowerCAmelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase ,tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = HubertForCTC(_lowerCamelCase )
else:
_lowerCAmelCase : Tuple = HubertModel(_lowerCamelCase )
if is_finetuned:
_lowerCAmelCase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCAmelCase : Tuple = model[0].eval()
recursively_load_weights(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_a : Dict = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 359 | """simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=30 , a__=400 , a__=True , a__=None , a__=0.9 , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , ):
_lowerCAmelCase : int = size if size is not None else {"""shortest_edge""": 30}
_lowerCAmelCase : Dict = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = min_resolution
_lowerCAmelCase : Dict = max_resolution
_lowerCAmelCase : str = do_resize_and_center_crop
_lowerCAmelCase : List[str] = size
_lowerCAmelCase : int = crop_pct
_lowerCAmelCase : int = crop_size
_lowerCAmelCase : Union[str, Any] = do_normalize
_lowerCAmelCase : Tuple = image_mean
_lowerCAmelCase : Optional[Any] = image_std
def __A ( self ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = PoolFormerImageProcessor if is_vision_available() else None
def __A ( self ):
_lowerCAmelCase : str = PoolFormerImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(a__ , """size""" ) )
self.assertTrue(hasattr(a__ , """crop_pct""" ) )
self.assertTrue(hasattr(a__ , """do_normalize""" ) )
self.assertTrue(hasattr(a__ , """image_mean""" ) )
self.assertTrue(hasattr(a__ , """image_std""" ) )
def __A ( self ):
_lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
_lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : List[str] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
_lowerCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : int = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : List[str] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 126 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCamelCase__ ( A__ = True , *A__ , **A__ ) -> Dict:
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
snake_case__ : Tuple = False
if main_process_only:
snake_case__ : str = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 143 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
_a : List[str] = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
_a : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a : Union[str, Any] = []
for batch in dl:
_a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
_a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
_a : str = get_lens(lowerCAmelCase_ )
_a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
_a : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 89 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = KandinskyVaaInpaintPipeline
lowerCamelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
lowerCamelCase = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
lowerCamelCase = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase = False
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return 32
@property
def _lowerCAmelCase ( self ) -> Optional[int]:
return 32
@property
def _lowerCAmelCase ( self ) -> List[Any]:
return self.time_input_dim
@property
def _lowerCAmelCase ( self ) -> Tuple:
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self ) -> int:
return 1_00
@property
def _lowerCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
_lowerCAmelCase ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase =UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[int]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
_lowerCAmelCase =VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.dummy_unet
_lowerCAmelCase =self.dummy_movq
_lowerCAmelCase =DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__UpperCAmelCase , )
_lowerCAmelCase ={
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Optional[Any]:
_lowerCAmelCase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_lowerCAmelCase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCAmelCase )
# create init_image
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_lowerCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
_lowerCAmelCase =np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase =0
if str(__UpperCAmelCase ).startswith("""mps""" ):
_lowerCAmelCase =torch.manual_seed(__UpperCAmelCase )
else:
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_lowerCAmelCase ={
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu"""
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =self.pipeline_class(**__UpperCAmelCase )
_lowerCAmelCase =pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase =pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
_lowerCAmelCase =output.images
_lowerCAmelCase =pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase =np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _lowerCAmelCase ( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase =np.ones((7_68, 7_68) , dtype=np.floataa )
_lowerCAmelCase =0
_lowerCAmelCase ="""a hat"""
_lowerCAmelCase =KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
_lowerCAmelCase =KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
_lowerCAmelCase =pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase =torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase =pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase =pipeline(
image=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 341 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
if len(__UpperCamelCase ) <= 1:
return arr, 0
_lowerCAmelCase =len(__UpperCamelCase ) // 2
_lowerCAmelCase =arr[0:mid]
_lowerCAmelCase =arr[mid:]
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any:
_lowerCAmelCase =[]
_lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _lowerCamelCase() -> str:
_lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
# an empty list should also have zero inversions
_lowerCAmelCase =[]
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
if __name__ == "__main__":
main()
| 341 | 1 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowercase_ ( A__ ):
__UpperCAmelCase = 'encodec'
def __init__( self , a=[1.5, 3.0, 6.0, 12.0, 24.0] , a=2_40_00 , a=1 , a=False , a=None , a=None , a=1_28 , a=32 , a=1 , a=[8, 5, 4, 2] , a="weight_norm" , a=7 , a=7 , a=3 , a=2 , a=True , a="reflect" , a=2 , a=2 , a=1.0 , a=10_24 , a=None , a=True , **a , ):
UpperCamelCase__ = target_bandwidths
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = audio_channels
UpperCamelCase__ = normalize
UpperCamelCase__ = chunk_length_s
UpperCamelCase__ = overlap
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_filters
UpperCamelCase__ = num_residual_layers
UpperCamelCase__ = upsampling_ratios
UpperCamelCase__ = norm_type
UpperCamelCase__ = kernel_size
UpperCamelCase__ = last_kernel_size
UpperCamelCase__ = residual_kernel_size
UpperCamelCase__ = dilation_growth_rate
UpperCamelCase__ = use_causal_conv
UpperCamelCase__ = pad_mode
UpperCamelCase__ = compress
UpperCamelCase__ = num_lstm_layers
UpperCamelCase__ = trim_right_ratio
UpperCamelCase__ = codebook_size
UpperCamelCase__ = codebook_dim if codebook_dim is not None else hidden_size
UpperCamelCase__ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def __a ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __a ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __a ( self ):
UpperCamelCase__ = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __a ( self ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 80 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__lowerCAmelCase = None
class __UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__lowerCAmelCase = PandasConfig
def A (self : Tuple ):
return datasets.DatasetInfo(features=self.config.features )
def A (self : Optional[int] , _lowerCAmelCase : List[Any] ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
A = data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def A (self : Dict , _lowerCAmelCase : pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A = table_cast(_lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def A (self : List[Any] , _lowerCAmelCase : Optional[Any] ):
for i, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
with open(_lowerCAmelCase , """rb""" ) as f:
A = pa.Table.from_pandas(pd.read_pickle(_lowerCAmelCase ) )
yield i, self._cast_table(_lowerCAmelCase )
| 258 | 0 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] )-> List[str]:
return EnvironmentCommand()
class __a ( lowerCAmelCase__ ):
@staticmethod
def snake_case_ ( a__ ):
_lowerCamelCase = parser.add_parser('env' )
download_parser.set_defaults(func=a__ )
def snake_case_ ( self ):
_lowerCamelCase = huggingface_hub.__version__
_lowerCamelCase = 'not installed'
_lowerCamelCase = 'NA'
if is_torch_available():
import torch
_lowerCamelCase = torch.__version__
_lowerCamelCase = torch.cuda.is_available()
_lowerCamelCase = 'not installed'
if is_transformers_available():
import transformers
_lowerCamelCase = transformers.__version__
_lowerCamelCase = 'not installed'
if is_accelerate_available():
import accelerate
_lowerCamelCase = accelerate.__version__
_lowerCamelCase = 'not installed'
if is_xformers_available():
import xformers
_lowerCamelCase = xformers.__version__
_lowerCamelCase = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})',
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(a__ ) )
return info
@staticmethod
def snake_case_ ( a__ ):
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 80 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ : List[str] ={"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] =["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] =["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
A_ : str =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : List[str] = tempfile.mkdtemp()
lowerCAmelCase_ : Any = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
lowerCAmelCase_ : Tuple = {
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowerCAmelCase_ : Any = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def UpperCAmelCase__ ( self : List[str] , **A_ : Dict):
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__)
def UpperCAmelCase__ ( self : Union[str, Any] , **A_ : Tuple):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__)
def UpperCAmelCase__ ( self : Tuple , **A_ : int):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__)
def UpperCAmelCase__ ( self : Dict):
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Dict = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
lowerCAmelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : Union[str, Any] = self.get_image_processor()
lowerCAmelCase_ : List[Any] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
processor_slow.save_pretrained(self.tmpdirname)
lowerCAmelCase_ : Dict = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__)
lowerCAmelCase_ : List[str] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
processor_fast.save_pretrained(self.tmpdirname)
lowerCAmelCase_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__)
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__)
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__)
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase_ : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
lowerCAmelCase_ : Union[str, Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0)
lowerCAmelCase_ : Union[str, Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__)
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Any = self.get_image_processor()
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : List[str] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
lowerCAmelCase_ : Optional[Any] = self.prepare_image_inputs()
lowerCAmelCase_ : Optional[int] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''')
lowerCAmelCase_ : List[Any] = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : str = self.get_image_processor()
lowerCAmelCase_ : List[Any] = self.get_tokenizer()
lowerCAmelCase_ : int = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
lowerCAmelCase_ : List[str] = "lower newer"
lowerCAmelCase_ : List[str] = processor(text=SCREAMING_SNAKE_CASE__)
lowerCAmelCase_ : List[str] = tokenizer(SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=6_4)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Optional[Any] = self.get_image_processor()
lowerCAmelCase_ : List[str] = self.get_tokenizer()
lowerCAmelCase_ : Union[str, Any] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
lowerCAmelCase_ : Optional[int] = "lower newer"
lowerCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase_ : int = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__):
processor()
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Any = self.get_image_processor()
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
lowerCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ : Optional[int] = processor.batch_decode(SCREAMING_SNAKE_CASE__)
lowerCAmelCase_ : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : int = self.get_image_processor()
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : int = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
lowerCAmelCase_ : Union[str, Any] = "lower newer"
lowerCAmelCase_ : List[str] = self.prepare_image_inputs()
lowerCAmelCase_ : Tuple = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 103 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
SCREAMING_SNAKE_CASE:str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
SCREAMING_SNAKE_CASE:Dict = components[:-1] + [test_fn.replace(".py" , "" )]
SCREAMING_SNAKE_CASE:str = ".".join(snake_case )
return test_module_path
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = get_module_path(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = importlib.import_module(snake_case )
return test_module
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = []
SCREAMING_SNAKE_CASE:List[Any] = get_test_module(snake_case )
for attr in dir(snake_case ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(snake_case , snake_case ) )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = []
SCREAMING_SNAKE_CASE:int = get_test_module(snake_case )
for attr in dir(snake_case ):
SCREAMING_SNAKE_CASE:Optional[Any] = getattr(snake_case , snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE:Union[str, Any] = getattr(snake_case , "all_model_classes" , [] )
if len(snake_case ) > 0:
test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = test_class()
if hasattr(snake_case , "setUp" ):
test.setUp()
SCREAMING_SNAKE_CASE:str = None
if hasattr(snake_case , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE:Tuple = test.model_tester.__class__
return model_tester
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:str = get_test_classes_for_model(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = []
for test_class in test_classes:
SCREAMING_SNAKE_CASE:Dict = get_model_tester_from_test_class(snake_case )
if tester_class is not None:
tester_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:str = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:Dict = {test_class: get_model_tester_from_test_class(snake_case ) for test_class in test_classes}
return test_tester_mapping
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_model_classes(snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = {
model_class: get_test_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_test_mapping
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_model_classes(snake_case )
SCREAMING_SNAKE_CASE:Tuple = {
model_class: get_tester_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def A_ ( snake_case ):
if isinstance(snake_case , snake_case ):
return o
elif isinstance(snake_case , snake_case ):
return o.__name__
elif isinstance(snake_case , (list, tuple) ):
return [to_json(snake_case ) for x in o]
elif isinstance(snake_case , snake_case ):
return {to_json(snake_case ): to_json(snake_case ) for k, v in o.items()}
else:
return o
| 139 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_UpperCAmelCase = grid[0]
for row_n in range(1 , len(__lowerCAmelCase ) ):
_UpperCAmelCase = grid[row_n]
_UpperCAmelCase = fill_row(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = grid[row_n]
return grid[-1][-1]
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowerCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , _a : List[str] , _a : Tuple=13 , _a : List[Any]=7 , _a : Optional[Any]=True , _a : Dict=True , _a : str=True , _a : Tuple=True , _a : List[Any]=99 , _a : Union[str, Any]=32 , _a : Optional[Any]=5 , _a : str=4 , _a : Optional[Any]=37 , _a : Any="gelu" , _a : Tuple=0.1 , _a : List[Any]=0.1 , _a : int=512 , _a : Union[str, Any]=16 , _a : str=2 , _a : Any=0.02 , _a : Any=4 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_attention_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_choices
def A_ ( self : List[Any] ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_attention_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A_ ( self : List[str] ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowercase ( A, unittest.TestCase ):
'''simple docstring'''
_A : List[str] = True
_A : Dict = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = FlaxRoFormerModelTester(self )
@slow
def A_ ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_a )
UpperCamelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self : Optional[int] ):
UpperCamelCase__ = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCamelCase__ = jnp.array([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(_a )[0]
UpperCamelCase__ = 50_000
UpperCamelCase__ = (1, 6, vocab_size)
self.assertEqual(output.shape , _a )
UpperCamelCase__ = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 35 | from __future__ import annotations
from typing import Any
def lowerCamelCase_ ( UpperCamelCase__ : list ):
'''simple docstring'''
if not postfix_notation:
return 0
UpperCamelCase__ = {'''+''', '''-''', '''*''', '''/'''}
UpperCamelCase__ = []
for token in postfix_notation:
if token in operations:
UpperCamelCase__ , UpperCamelCase__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = "Hello world! cécé herlolip"
def __lowerCamelCase ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
UpperCamelCase = FairseqRobertaModel.from_pretrained(A__ )
roberta.eval() # disable dropout
UpperCamelCase = roberta.model.encoder.sentence_encoder
UpperCamelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , A__ )
UpperCamelCase = XLMRobertaXLForSequenceClassification(A__ ) if classification_head else XLMRobertaXLForMaskedLM(A__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCamelCase = roberta_sent_encoder.embed_tokens.weight
UpperCamelCase = roberta_sent_encoder.embed_positions.weight
UpperCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCamelCase = roberta_sent_encoder.layer_norm.weight
UpperCamelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCamelCase = model.roberta.encoder.layer[i]
UpperCamelCase = roberta_sent_encoder.layers[i]
UpperCamelCase = layer.attention
UpperCamelCase = roberta_layer.self_attn_layer_norm.weight
UpperCamelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCamelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCamelCase = roberta_layer.self_attn.q_proj.weight
UpperCamelCase = roberta_layer.self_attn.q_proj.bias
UpperCamelCase = roberta_layer.self_attn.k_proj.weight
UpperCamelCase = roberta_layer.self_attn.k_proj.bias
UpperCamelCase = roberta_layer.self_attn.v_proj.weight
UpperCamelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCamelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCamelCase = roberta_layer.self_attn.out_proj.weight
UpperCamelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCamelCase = roberta_layer.final_layer_norm.weight
UpperCamelCase = roberta_layer.final_layer_norm.bias
# intermediate
UpperCamelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCamelCase = roberta_layer.fca.weight
UpperCamelCase = roberta_layer.fca.bias
# output
UpperCamelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCamelCase = roberta_layer.fca.weight
UpperCamelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCamelCase = roberta.model.classification_heads['mnli'].dense.weight
UpperCamelCase = roberta.model.classification_heads['mnli'].dense.bias
UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.weight
UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
UpperCamelCase = roberta.model.encoder.lm_head.dense.weight
UpperCamelCase = roberta.model.encoder.lm_head.dense.bias
UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.weight
UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.bias
UpperCamelCase = roberta.model.encoder.lm_head.weight
UpperCamelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCamelCase = roberta.encode(A__ ).unsqueeze(0 ) # batch of size 1
UpperCamelCase = model(A__ )[0]
if classification_head:
UpperCamelCase = roberta.model.classification_heads['mnli'](roberta.extract_features(A__ ) )
else:
UpperCamelCase = roberta.model(A__ )[0]
print(our_output.shape , their_output.shape )
UpperCamelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
UpperCamelCase = torch.allclose(A__ , A__ , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(A__ ).mkdir(parents=A__ , exist_ok=A__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
_lowerCamelCase : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 28 |
import pprint
import requests
a__ = """https://zenquotes.io/api"""
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
a__ = random_quotes()
pprint.pprint(response)
| 317 | 0 |
from __future__ import annotations
from collections.abc import Callable
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int = 100 , ):
'''simple docstring'''
lowercase__ : Tuple = x_start
lowercase__ : Tuple = fnc(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = 0.0
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowercase__ : Any = (x_end - x_start) / steps + xa
lowercase__ : Optional[Any] = fnc(SCREAMING_SNAKE_CASE_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowercase__ : Any = xa
lowercase__ : str = fxa
return area
if __name__ == "__main__":
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
snake_case_ = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 216 |
from __future__ import annotations
import math
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : int = str(SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any] = [n]
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if len(str(SCREAMING_SNAKE_CASE_ ) ) > 3:
if not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[-3:] ) ) or not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[:3] ) ):
return False
return True
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 11 ):
'''simple docstring'''
lowercase__ : list[int] = []
lowercase__ : Tuple = 13
while len(SCREAMING_SNAKE_CASE_ ) != count:
if validate(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[int] = list_truncated_nums(SCREAMING_SNAKE_CASE_ )
if all(is_prime(SCREAMING_SNAKE_CASE_ ) for i in list_nums ):
list_truncated_primes.append(SCREAMING_SNAKE_CASE_ )
num += 2
return list_truncated_primes
def snake_case__ ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 216 | 1 |
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
_a = t // 36_00, (t // 60) % 60, t % 60
return f'{h}:{m:02d}:{s:02d}' if h != 0 else f'{m:02d}:{s:02d}'
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : int=3_00 ):
"""simple docstring"""
return f'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_a = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f' <th>{i}</th>\n'
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_a = f'{elt:.6f}' if isinstance(_lowerCAmelCase, _lowerCAmelCase ) else str(_lowerCAmelCase )
html_code += f' <td>{elt}</td>\n'
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __lowerCamelCase :
'''simple docstring'''
A_ : List[Any] = 5
A_ : Dict = 0.2
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 300 , ) -> Tuple:
_a = total
_a = """""" if prefix is None else prefix
_a = leave
_a = parent
_a = width
_a = None
_a = None
_a = None
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None ) -> Tuple:
_a = value
if comment is not None:
_a = comment
if self.last_value is None:
_a = time.time()
_a = value
_a = None
_a = self.warmup
_a = 1
self.update_bar(__UpperCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_a = time.time()
_a = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_a = self.elapsed_time / (value - self.start_value)
else:
_a = None
if value >= self.total:
_a = self.total
_a = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_a = self.average_time_per_item * (self.total - value)
self.update_bar(__UpperCAmelCase )
_a = value
_a = current_time
if self.average_time_per_item is None:
_a = 1
else:
_a = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[str]:
_a = """ """ * (len(str(self.total ) ) - len(str(__UpperCAmelCase ) )) + str(__UpperCAmelCase )
if self.elapsed_time is None:
_a = F'[{spaced_value}/{self.total} : < :'
elif self.predicted_remaining is None:
_a = F'[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'
else:
_a = (
F'[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'
F' {format_time(self.predicted_remaining )}'
)
self.label += F', {1/self.average_time_per_item:.2f} it/s'
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F', {self.comment}]'
self.display()
def _UpperCAmelCase ( self ) -> List[Any]:
_a = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_a = disp.display(disp.HTML(self.html_code ) , display_id=__UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def _UpperCAmelCase ( self ) -> Tuple:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[int]:
super().__init__(__UpperCAmelCase )
_a = None if column_names is None else [column_names]
_a = None
def _UpperCAmelCase ( self ) -> str:
_a = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_a = disp.display(disp.HTML(self.html_code ) , display_id=__UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if self.inner_table is None:
_a = [list(values.keys() ), list(values.values() )]
else:
_a = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__UpperCAmelCase )
_a = columns
self.inner_table.append([values[c] for c in columns] )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=300 ) -> Union[str, Any]:
_a = NotebookProgressBar(__UpperCAmelCase , prefix=__UpperCAmelCase , parent=self , width=__UpperCAmelCase )
return self.child_bar
def _UpperCAmelCase ( self ) -> Tuple:
_a = None
self.display()
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ) -> Union[str, Any]:
_a = None
_a = None
_a = False
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> str:
_a = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
_a = 0
_a = 0
_a = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
_a = NotebookTrainingTracker(state.max_steps , __UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> str:
_a = int(state.epoch ) if int(state.epoch ) == state.epoch else F'{state.epoch:.2f}'
self.training_tracker.update(
state.global_step + 1 , comment=F'Epoch {epoch}/{state.num_train_epochs}' , force_update=self._force_next_update , )
_a = False
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> List[Any]:
if not has_length(__UpperCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_a = self.training_tracker.add_child(len(__UpperCAmelCase ) )
else:
_a = NotebookProgressBar(len(__UpperCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Any:
if self.prediction_bar is not None:
self.prediction_bar.close()
_a = None
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> str:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_a = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
_a = state.global_step
self.training_tracker.write_line(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Dict:
if self.training_tracker is not None:
_a = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
_a = log["""loss"""]
break
if self.first_column == "Epoch":
_a = int(state.epoch )
else:
_a = state.global_step
_a = """eval"""
for k in metrics:
if k.endswith('''_loss''' ):
_a = re.sub(r'''\_loss$''' , '''''' , __UpperCAmelCase )
_a = metrics.pop('''total_flos''' , __UpperCAmelCase )
_a = metrics.pop('''epoch''' , __UpperCAmelCase )
_a = metrics.pop(F'{metric_key_prefix}_runtime' , __UpperCAmelCase )
_a = metrics.pop(F'{metric_key_prefix}_samples_per_second' , __UpperCAmelCase )
_a = metrics.pop(F'{metric_key_prefix}_steps_per_second' , __UpperCAmelCase )
_a = metrics.pop(F'{metric_key_prefix}_jit_compilation_time' , __UpperCAmelCase )
for k, v in metrics.items():
if k == F'{metric_key_prefix}_loss':
_a = v
else:
_a = k.split('''_''' )
_a = """ """.join([part.capitalize() for part in splits[1:]] )
_a = v
self.training_tracker.write_line(__UpperCAmelCase )
self.training_tracker.remove_child()
_a = None
# Evaluation takes a long time so we should force the next update.
_a = True
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
self.training_tracker.update(
state.global_step , comment=F'Epoch {int(state.epoch )}/{state.num_train_epochs}' , force_update=__UpperCAmelCase )
_a = None | 320 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 0 |
"""simple docstring"""
from __future__ import annotations
__UpperCamelCase = list[list[int]]
# assigning initial values to the grid
__UpperCamelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCamelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowercase (SCREAMING_SNAKE_CASE_ : Matrix , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowercase (SCREAMING_SNAKE_CASE_ : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowercase (SCREAMING_SNAKE_CASE_ : Matrix ) -> Matrix | None:
if location := find_empty_location(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = digit
if sudoku(SCREAMING_SNAKE_CASE_ ) is not None:
return grid
SCREAMING_SNAKE_CASE = 0
return None
def lowercase (SCREAMING_SNAKE_CASE_ : Matrix ) -> None:
for row in grid:
for cell in row:
print(SCREAMING_SNAKE_CASE_ , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCamelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 38 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = XGLMConfig
SCREAMING_SNAKE_CASE_ : List[str] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=14 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=0.02 , ) -> str:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = ffn_dim
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 1
def __A ( self ) -> Optional[int]:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = self.get_config()
SCREAMING_SNAKE_CASE = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __A ( self ) -> int:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : int = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFXGLMModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __A ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@slow
def __A ( self ) -> Tuple:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFXGLMModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __A ( self ) -> Tuple:
super().test_resize_token_embeddings()
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self , lowerCAmelCase__=True ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
SCREAMING_SNAKE_CASE = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ )
@slow
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE = tokenizer('Today is a nice day and' , return_tensors='tf' )
SCREAMING_SNAKE_CASE = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = 'left'
# use different length sentences to test batching
SCREAMING_SNAKE_CASE = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors='tf' , padding=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inputs['input_ids']
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
| 38 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
A_ : Dict = logging.getLogger(__name__)
torch.set_grad_enabled(False)
A_ : str = 'cuda' if torch.cuda.is_available() else 'cpu'
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1_0_0 , SCREAMING_SNAKE_CASE=" " ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = text.split(SCREAMING_SNAKE_CASE )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )]
def __a ( SCREAMING_SNAKE_CASE ) -> dict:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE ):
titles.append(title if title is not None else '''''' )
texts.append(SCREAMING_SNAKE_CASE )
return {"title": titles, "text": texts}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> dict:
'''simple docstring'''
__UpperCAmelCase = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
__UpperCAmelCase = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE ) , return_dict=SCREAMING_SNAKE_CASE ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__UpperCAmelCase = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__UpperCAmelCase = dataset.map(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , num_proc=processing_args.num_proc )
# And compute the embeddings
__UpperCAmelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__UpperCAmelCase = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
__UpperCAmelCase = dataset.map(
partial(SCREAMING_SNAKE_CASE , ctx_encoder=SCREAMING_SNAKE_CASE , ctx_tokenizer=SCREAMING_SNAKE_CASE ) , batched=SCREAMING_SNAKE_CASE , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE , )
# And finally save your dataset
__UpperCAmelCase = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(SCREAMING_SNAKE_CASE )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__UpperCAmelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=SCREAMING_SNAKE_CASE )
# And save the index
__UpperCAmelCase = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(SCREAMING_SNAKE_CASE )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A_ :
'''simple docstring'''
a__ = field(
default=str(Path(_a ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
a__ = field(
default=_a , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
a__ = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
a__ = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
a__ = field(
default=str(Path(_a ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class A_ :
'''simple docstring'''
a__ = field(
default=_a , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
a__ = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class A_ :
'''simple docstring'''
a__ = field(
default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
a__ = field(
default=1_28 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
A_ : str = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
A_ , A_ , A_ : Any = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
A_ : Optional[Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 333 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
'''simple docstring'''
model.train()
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
set_seed(4_2 )
__UpperCAmelCase = RegressionModel()
__UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
__UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase = RegressionDataset(length=9_6 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if iteration < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if batch_num < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __a ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowerCAmelCase__ ( a , a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> str:
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__SCREAMING_SNAKE_CASE , device=torch.device(__SCREAMING_SNAKE_CASE ) , )
__SCREAMING_SNAKE_CASE = floats_tensor(control_image.shape , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((64, 64) )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class lowerCAmelCase__ ( a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__SCREAMING_SNAKE_CASE : Union[str, Any] ):
if isinstance(__SCREAMING_SNAKE_CASE , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__SCREAMING_SNAKE_CASE = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__SCREAMING_SNAKE_CASE = MultiControlNetModel([controlneta, controlneta] )
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=0 ) -> Tuple:
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__SCREAMING_SNAKE_CASE , device=torch.device(__SCREAMING_SNAKE_CASE ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__SCREAMING_SNAKE_CASE , device=torch.device(__SCREAMING_SNAKE_CASE ) , ),
]
__SCREAMING_SNAKE_CASE = floats_tensor(control_image[0].shape , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((64, 64) )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 10.0
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = steps
__SCREAMING_SNAKE_CASE = scale
__SCREAMING_SNAKE_CASE = pipe(**__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = steps
__SCREAMING_SNAKE_CASE = scale
__SCREAMING_SNAKE_CASE = pipe(**__SCREAMING_SNAKE_CASE , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = steps
__SCREAMING_SNAKE_CASE = scale
__SCREAMING_SNAKE_CASE = pipe(**__SCREAMING_SNAKE_CASE , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = steps
__SCREAMING_SNAKE_CASE = scale
__SCREAMING_SNAKE_CASE = pipe(**__SCREAMING_SNAKE_CASE , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__SCREAMING_SNAKE_CASE )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
__SCREAMING_SNAKE_CASE = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__SCREAMING_SNAKE_CASE , controlnet=__SCREAMING_SNAKE_CASE )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = """evil space-punk bird"""
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
__SCREAMING_SNAKE_CASE = pipe(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , control_image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 370 |
'''simple docstring'''
class lowerCAmelCase__ : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = row
__SCREAMING_SNAKE_CASE = col
__SCREAMING_SNAKE_CASE = graph
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1]
__SCREAMING_SNAKE_CASE = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int: # And finally, count all islands.
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )]
__SCREAMING_SNAKE_CASE = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += 1
return count
| 331 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = False ) -> float:
if not arr:
return 0
lowerCamelCase__ : int = 0 if allow_empty_subarrays else float("""-inf""" )
lowerCamelCase__ : List[Any] = 0.0
for num in arr:
lowerCamelCase__ : Tuple = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCamelCase__ : List[Any] = max(snake_case_ , snake_case_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_A : Optional[int] =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 41 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCAmelCase = """true"""
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : int=8_2 , snake_case_ : Optional[Any]=1_6 ) ->Dict:
set_seed(4_2 )
lowerCamelCase__ : List[Any] =RegressionModel()
lowerCamelCase__ : List[Any] =deepcopy(snake_case_ )
lowerCamelCase__ : List[str] =RegressionDataset(length=snake_case_ )
lowerCamelCase__ : Any =DataLoader(snake_case_ , batch_size=snake_case_ )
model.to(accelerator.device )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.prepare(snake_case_ , snake_case_ )
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : str=False ) ->List[str]:
lowerCamelCase__ : int =AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowerCamelCase__ : List[Any] =load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case_ : Optional[Any] ):
lowerCamelCase__ : Optional[int] =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
with accelerator.main_process_first():
lowerCamelCase__ : Tuple =dataset.map(
snake_case_ , batched=snake_case_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowerCamelCase__ : List[Any] =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case_ : Union[str, Any] ):
if use_longest:
return tokenizer.pad(snake_case_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return DataLoader(snake_case_ , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=1_6 )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple ) ->Any:
lowerCamelCase__ : Optional[int] =Accelerator(dispatch_batches=snake_case_ , split_batches=snake_case_ )
lowerCamelCase__ : List[Any] =get_dataloader(snake_case_ , not dispatch_batches )
lowerCamelCase__ : Union[str, Any] =AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Dict =accelerator.prepare(snake_case_ , snake_case_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[str] ) ->Dict:
lowerCamelCase__ : Optional[Any] =[]
for batch in dataloader:
lowerCamelCase__ , lowerCamelCase__ : int =batch.values()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =[], []
for logit, targ in logits_and_targets:
logits.append(snake_case_ )
targs.append(snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =torch.cat(snake_case_ ), torch.cat(snake_case_ )
return logits, targs
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : Optional[int]=8_2 , snake_case_ : Any=False , snake_case_ : List[Any]=False , snake_case_ : Optional[int]=1_6 ) ->List[str]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =get_basic_setup(snake_case_ , snake_case_ , snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Any =generate_predictions(snake_case_ , snake_case_ , snake_case_ )
assert (
len(snake_case_ ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case_ )}"""
def lowerCAmelCase_ ( snake_case_ : bool = False , snake_case_ : bool = False ) ->str:
lowerCamelCase__ : Dict =evaluate.load('glue' , 'mrpc' )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =get_mrpc_setup(snake_case_ , snake_case_ )
# First do baseline
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =setup['no']
model.to(snake_case_ )
model.eval()
for batch in dataloader:
batch.to(snake_case_ )
with torch.inference_mode():
lowerCamelCase__ : Any =model(**snake_case_ )
lowerCamelCase__ : List[str] =outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case_ , references=batch['labels'] )
lowerCamelCase__ : Optional[Any] =metric.compute()
# Then do distributed
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCamelCase__ : List[Any] =model(**snake_case_ )
lowerCamelCase__ : str =outputs.logits.argmax(dim=-1 )
lowerCamelCase__ : int =batch['labels']
lowerCamelCase__ , lowerCamelCase__ : List[Any] =accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case_ , references=snake_case_ )
lowerCamelCase__ : List[str] =metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowerCAmelCase_ ( ) ->str:
lowerCamelCase__ : List[str] =Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case_ , snake_case_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCamelCase__ : Dict =Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case_ , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowerCamelCase__ : List[Any] =Accelerator()
test_torch_metrics(snake_case_ , 5_1_2 )
accelerator.state._reset_state()
def lowerCAmelCase_ ( snake_case_ : List[Any] ) ->Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 126 | 0 |
"""simple docstring"""
def _A (__a ) -> Optional[Any]:
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _A (__a ) -> list[tuple[int, int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = len(__lowerCAmelCase ) # No of vertices in graph
SCREAMING_SNAKE_CASE_ : Any = [0] * n
SCREAMING_SNAKE_CASE_ : List[str] = [False] * n
def dfs(__a , __a , __a , __a ):
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
SCREAMING_SNAKE_CASE_ : int = min(low[at] , low[to] )
SCREAMING_SNAKE_CASE_ : int = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "rwkv"
__UpperCamelCase = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , lowercase_ : Any=50277 , lowercase_ : str=1024 , lowercase_ : List[str]=4096 , lowercase_ : Optional[Any]=32 , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : List[Any]=1e-5 , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=0 , lowercase_ : int=6 , lowercase_ : Tuple=False , lowercase_ : Any=True , **lowercase_ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Any = context_length
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size
SCREAMING_SNAKE_CASE_ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size
SCREAMING_SNAKE_CASE_ : int = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_every
SCREAMING_SNAKE_CASE_ : Dict = use_cache
SCREAMING_SNAKE_CASE_ : Dict = bos_token_id
SCREAMING_SNAKE_CASE_ : Any = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
| 318 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Tuple =RoCBertTokenizer
UpperCamelCase__ : List[str] =None
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : Optional[int] =True
UpperCamelCase__ : Union[str, Any] =filter_non_english
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
__UpperCamelCase : Optional[Any] =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase : List[Any] ={}
__UpperCamelCase : Any ={}
for i, value in enumerate(lowerCamelCase__ ):
__UpperCamelCase : List[Any] =i
__UpperCamelCase : Optional[int] =i
__UpperCamelCase : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(lowerCamelCase__ , lowerCamelCase__ , ensure_ascii=lowerCamelCase__ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(lowerCamelCase__ , lowerCamelCase__ , ensure_ascii=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase : Tuple =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(lowerCamelCase__ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase : Any ={}
for i, token in enumerate(lowerCamelCase__ ):
__UpperCamelCase : Any =i
__UpperCamelCase : Optional[int] =RoCBertWordpieceTokenizer(vocab=lowerCamelCase__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def __lowercase ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __lowercase ( self ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __lowercase ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase : Optional[int] =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def __lowercase ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : List[Any] =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase : str =tokenizer_r.encode_plus(
lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , )
__UpperCamelCase : int =tokenizer_r.do_lower_case if hasattr(lowerCamelCase__ , 'do_lower_case' ) else False
__UpperCamelCase : List[Any] =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =['的', '人', '有']
__UpperCamelCase : List[Any] =''.join(lowerCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase : Dict =True
__UpperCamelCase : List[str] =self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : List[Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =tokenizer_p.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCamelCase : int =tokenizer_r.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCamelCase : List[str] =tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
__UpperCamelCase : List[str] =tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =False
__UpperCamelCase : List[str] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Tuple =self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : List[str] =tokenizer_r.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCamelCase : List[str] =tokenizer_p.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Dict =tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
__UpperCamelCase : int =tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase : str =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ )
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase : Optional[int] =tokenizer.encode('你好' , add_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =tokenizer.encode('你是谁' , add_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Dict =tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
__UpperCamelCase : Tuple =tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase : Dict ='你好,你是谁'
__UpperCamelCase : str =tokenizer.tokenize(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =tokenizer.convert_tokens_to_shape_ids(lowerCamelCase__ )
__UpperCamelCase : Dict =tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =tokenizer.prepare_for_model(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Any =tokenizer.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 71 |
import math
import unittest
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
with self.assertRaises(A):
is_prime(-19)
self.assertFalse(
is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 339 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : str = logging.get_logger(__name__)
_snake_case : str = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def a_ ( lowerCAmelCase_ : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__lowerCAmelCase = k.replace(lowerCAmelCase_, lowerCAmelCase_ )
if k.startswith('encoder' ):
__lowerCAmelCase = k.replace('.attn', '.self_attn' )
__lowerCAmelCase = k.replace('norm1', 'self_attn_layer_norm' )
__lowerCAmelCase = k.replace('norm2', 'final_layer_norm' )
elif k.startswith('decoder' ):
__lowerCAmelCase = k.replace('norm1', 'self_attn_layer_norm' )
__lowerCAmelCase = k.replace('norm2', 'encoder_attn_layer_norm' )
__lowerCAmelCase = k.replace('norm3', 'final_layer_norm' )
return k
def a_ ( lowerCAmelCase_ : Dict ):
__lowerCAmelCase = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__lowerCAmelCase = sd.pop(lowerCAmelCase_ )
__lowerCAmelCase = k.replace('layernorm_embedding', 'layer_norm' )
assert new_k not in sd
__lowerCAmelCase = v
_snake_case : Dict = ['START']
@torch.no_grad()
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : str, lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = torch.load(lowerCAmelCase_, map_location='cpu' )
__lowerCAmelCase = model['model']
__lowerCAmelCase = BlenderbotConfig.from_json_file(lowerCAmelCase_ )
__lowerCAmelCase = BlenderbotForConditionalGeneration(lowerCAmelCase_ )
__lowerCAmelCase = m.model.state_dict().keys()
__lowerCAmelCase = []
__lowerCAmelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__lowerCAmelCase = rename_state_dict_key(lowerCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__lowerCAmelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCAmelCase_ )
m.model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
m.half()
m.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
_snake_case : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 354 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Optional[Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 207 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
with open(__A ) as metadata_file:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = LukeConfig(use_entity_aware_attention=__A , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
UpperCamelCase__ = torch.load(__A , map_location="cpu" )["module"]
# Load the entity vocab file
UpperCamelCase__ = load_original_entity_vocab(__A )
# add an entry for [MASK2]
UpperCamelCase__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase__ = AddedToken("<ent>" , lstrip=__A , rstrip=__A )
UpperCamelCase__ = AddedToken("<ent2>" , lstrip=__A , rstrip=__A )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__A )
with open(os.path.join(__A , "tokenizer_config.json" ) , "r" ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = "MLukeTokenizer"
with open(os.path.join(__A , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__A , __A )
with open(os.path.join(__A , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__A , __A )
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
# Initialize the embeddings of the special tokens
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["@"] )[0]
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["#"] )[0]
UpperCamelCase__ = state_dict["embeddings.word_embeddings.weight"]
UpperCamelCase__ = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase__ = state_dict[bias_name]
UpperCamelCase__ = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase__ = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase__ = state_dict["entity_embeddings.entity_embeddings.weight"]
UpperCamelCase__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase__ = state_dict["entity_predictions.bias"]
UpperCamelCase__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase__ = LukeForMaskedLM(config=__A ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
UpperCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
UpperCamelCase__ = state_dict[key]
else:
UpperCamelCase__ = state_dict[key]
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(__A , strict=__A )
if set(__A ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A , task="entity_classification" )
UpperCamelCase__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
UpperCamelCase__ = (0, 9)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 33, 768) )
UpperCamelCase__ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 1, 768) )
UpperCamelCase__ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
UpperCamelCase__ = "Tokyo is the capital of <mask>."
UpperCamelCase__ = (24, 30)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
UpperCamelCase__ = encoding["input_ids"][0].tolist()
UpperCamelCase__ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
UpperCamelCase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__A )
UpperCamelCase__ = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__A ) )
model.save_pretrained(__A )
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = ["[MASK]", "[PAD]", "[UNK]"]
UpperCamelCase__ = [json.loads(__A ) for line in open(__A )]
UpperCamelCase__ = {}
for entry in data:
UpperCamelCase__ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase__ = entity_id
break
UpperCamelCase__ = F'''{language}:{entity_name}'''
UpperCamelCase__ = entity_id
return new_mapping
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a__ : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 80 |
'''simple docstring'''
from math import factorial, pi
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 80 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = AlbertTokenizer
__A = AlbertTokenizerFast
__A = True
__A = True
__A = True
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = AlbertTokenizer(lowercase_)
tokenizer.save_pretrained(self.tmpdirname)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = "this is a test"
_UpperCamelCase = "this is a test"
return input_text, output_text
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = "<pad>"
_UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<pad>")
self.assertEqual(vocab_keys[1] , "<unk>")
self.assertEqual(vocab_keys[-1] , "▁eloquent")
self.assertEqual(len(lowercase_) , 30000)
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 30000)
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = "I was born in 92000, and this is falsé."
_UpperCamelCase = tokenizer.tokenize(lowercase_)
_UpperCamelCase = rust_tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
_UpperCamelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
_UpperCamelCase = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(lowercase_)
_UpperCamelCase = rust_tokenizer.encode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = AlbertTokenizer(lowercase_ , keep_accents=lowercase_)
_UpperCamelCase = tokenizer.tokenize("This is a test")
self.assertListEqual(lowercase_ , ["▁this", "▁is", "▁a", "▁test"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , [48, 25, 21, 1289])
_UpperCamelCase = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowercase_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."])
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowercase_)
self.assertListEqual(lowercase_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9])
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowercase_)
self.assertListEqual(
lowercase_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = AlbertTokenizer(lowercase_)
_UpperCamelCase = tokenizer.encode("sequence builders")
_UpperCamelCase = tokenizer.encode("multi-sequence build")
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase_)
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __UpperCAmelCase ( self : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 63 | import requests
from bsa import BeautifulSoup
def lowerCAmelCase__ ( a__ = "https://www.worldometers.info/coronavirus" ) ->dict:
'''simple docstring'''
_UpperCamelCase = BeautifulSoup(requests.get(a__ ).text , "html.parser" )
_UpperCamelCase = soup.findAll("h1" )
_UpperCamelCase = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(a__ , a__ )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(F"{key}\n{value}\n")
| 63 | 1 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> List[Any]:
'''simple docstring'''
a__ : int =size if size is not None else {"height": 1_8, "width": 1_8}
a__ : Dict =parent
a__ : Union[str, Any] =batch_size
a__ : List[Any] =num_channels
a__ : str =image_size
a__ : Any =min_resolution
a__ : Dict =max_resolution
a__ : Optional[int] =do_resize
a__ : List[str] =size
a__ : Union[str, Any] =do_normalize
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Tuple =ImageGPTImageProcessingTester(self )
@property
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "clusters" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
a__ : Optional[Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Dict =self.image_processing_class(**self.image_processor_dict )
a__ : Optional[Any] =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase__ )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Tuple =os.path.join(lowerCAmelCase__ , "image_processor.json" )
image_processor_first.to_json_file(lowerCAmelCase__ )
a__ : List[Any] =self.image_processing_class.from_json_file(lowerCAmelCase__ ).to_dict()
a__ : Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase__ )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase__ )
a__ : List[Any] =self.image_processing_class.from_pretrained(lowerCAmelCase__ ).to_dict()
a__ : List[Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase__ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( ):
"""simple docstring"""
a__ : Optional[int] =load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
a__ : Union[str, Any] =Image.open(dataset[4]["file"] )
a__ : Any =Image.open(dataset[5]["file"] )
a__ : str =[imagea, imagea]
return images
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
@slow
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Tuple =ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
a__ : List[Any] =prepare_images()
# test non-batched
a__ : List[str] =image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
a__ : Any =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase__ )
# test batched
a__ : Optional[Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
a__ : Tuple =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase__ )
| 95 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11 | 0 |
'''simple docstring'''
from __future__ import annotations
def __a ( _UpperCamelCase: float , _UpperCamelCase: float , _UpperCamelCase: float , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
'''simple docstring'''
def __a ( _UpperCamelCase: int ) -> None:
"""simple docstring"""
_snake_case = generate_pascal_triangle(_UpperCamelCase )
for row_idx in range(_UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def __a ( _UpperCamelCase: int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_snake_case = []
for current_row_idx in range(_UpperCamelCase ):
_snake_case = populate_current_row(_UpperCamelCase , _UpperCamelCase )
triangle.append(_UpperCamelCase )
return triangle
def __a ( _UpperCamelCase: list[list[int]] , _UpperCamelCase: int ) -> list[int]:
"""simple docstring"""
_snake_case = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_snake_case , _snake_case = 1, 1
for current_col_idx in range(1 , _UpperCamelCase ):
calculate_current_element(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return current_row
def __a ( _UpperCamelCase: list[list[int]] , _UpperCamelCase: list[int] , _UpperCamelCase: int , _UpperCamelCase: int , ) -> None:
"""simple docstring"""
_snake_case = triangle[current_row_idx - 1][current_col_idx - 1]
_snake_case = triangle[current_row_idx - 1][current_col_idx]
_snake_case = above_to_left_elt + above_to_right_elt
def __a ( _UpperCamelCase: int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_snake_case = [[1]]
for row_index in range(1 , _UpperCamelCase ):
_snake_case = [0] + result[-1] + [0]
_snake_case = row_index + 1
# Calculate the number of distinct elements in a row
_snake_case = sum(divmod(_UpperCamelCase , 2 ) )
_snake_case = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_snake_case = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_snake_case = row_first_half + row_second_half
result.append(_UpperCamelCase )
return result
def __a ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase: Callable , _UpperCamelCase: int ) -> None:
_snake_case = F"""{func.__name__}({value})"""
_snake_case = timeit(F"""__main__.{call}""" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 142 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "beit"
def __init__( self : List[str] , snake_case_ : Union[str, Any]=8_192 , snake_case_ : int=768 , snake_case_ : Any=12 , snake_case_ : int=12 , snake_case_ : List[Any]=3_072 , snake_case_ : Optional[int]="gelu" , snake_case_ : str=0.0 , snake_case_ : Any=0.0 , snake_case_ : int=0.02 , snake_case_ : int=1E-1_2 , snake_case_ : Union[str, Any]=224 , snake_case_ : str=16 , snake_case_ : List[str]=3 , snake_case_ : List[str]=False , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=False , snake_case_ : List[Any]=False , snake_case_ : List[str]=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : Dict=True , snake_case_ : List[str]=[3, 5, 7, 11] , snake_case_ : Optional[Any]=[1, 2, 3, 6] , snake_case_ : str=True , snake_case_ : List[str]=0.4 , snake_case_ : List[Any]=256 , snake_case_ : str=1 , snake_case_ : Dict=False , snake_case_ : Optional[int]=255 , **snake_case_ : Optional[Any] , ):
super().__init__(**snake_case_ )
snake_case__ : Any = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : List[Any] = image_size
snake_case__ : List[Any] = patch_size
snake_case__ : List[Any] = num_channels
snake_case__ : Union[str, Any] = use_mask_token
snake_case__ : List[str] = use_absolute_position_embeddings
snake_case__ : List[Any] = use_relative_position_bias
snake_case__ : List[str] = use_shared_relative_position_bias
snake_case__ : Dict = layer_scale_init_value
snake_case__ : Optional[int] = drop_path_rate
snake_case__ : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ : Optional[int] = out_indices
snake_case__ : str = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ : Dict = use_auxiliary_head
snake_case__ : Any = auxiliary_loss_weight
snake_case__ : int = auxiliary_channels
snake_case__ : Optional[int] = auxiliary_num_convs
snake_case__ : Optional[int] = auxiliary_concat_input
snake_case__ : Tuple = semantic_loss_ignore_index
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = version.parse("1.11" )
@property
def lowerCamelCase ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase ( self : Dict ):
return 1E-4
| 35 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
snake_case__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __snake_case( ) -> List[str]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """data2vec-vision"""
def __init__( self , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-1_2 , __lowerCamelCase=224 , __lowerCamelCase=16 , __lowerCamelCase=3 , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=True , __lowerCamelCase=[3, 5, 7, 11] , __lowerCamelCase=[1, 2, 3, 6] , __lowerCamelCase=True , __lowerCamelCase=0.4 , __lowerCamelCase=256 , __lowerCamelCase=1 , __lowerCamelCase=False , __lowerCamelCase=255 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
__A : Optional[int] = hidden_size
__A : Union[str, Any] = num_hidden_layers
__A : Union[str, Any] = num_attention_heads
__A : str = intermediate_size
__A : int = hidden_act
__A : List[str] = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : int = initializer_range
__A : str = layer_norm_eps
__A : int = image_size
__A : List[str] = patch_size
__A : List[str] = num_channels
__A : Union[str, Any] = use_mask_token
__A : Dict = use_absolute_position_embeddings
__A : Optional[Any] = use_relative_position_bias
__A : List[str] = use_shared_relative_position_bias
__A : List[Any] = layer_scale_init_value
__A : List[str] = drop_path_rate
__A : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
__A : Dict = out_indices
__A : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
__A : Any = use_auxiliary_head
__A : List[str] = auxiliary_loss_weight
__A : Dict = auxiliary_channels
__A : List[str] = auxiliary_num_convs
__A : List[Any] = auxiliary_concat_input
__A : List[Any] = semantic_loss_ignore_index
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse("""1.11""" )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return 1e-4
| 361 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """dinat"""
_lowerCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __lowerCamelCase=4 , __lowerCamelCase=3 , __lowerCamelCase=64 , __lowerCamelCase=[3, 4, 6, 5] , __lowerCamelCase=[2, 4, 8, 16] , __lowerCamelCase=7 , __lowerCamelCase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __lowerCamelCase=3.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0 , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
__A : Dict = patch_size
__A : Union[str, Any] = num_channels
__A : str = embed_dim
__A : Optional[Any] = depths
__A : int = len(__lowerCamelCase )
__A : Union[str, Any] = num_heads
__A : Tuple = kernel_size
__A : Optional[int] = dilations
__A : Tuple = mlp_ratio
__A : Optional[int] = qkv_bias
__A : int = hidden_dropout_prob
__A : Dict = attention_probs_dropout_prob
__A : int = drop_path_rate
__A : Dict = hidden_act
__A : Any = layer_norm_eps
__A : Tuple = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A : str = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
__A : List[Any] = layer_scale_init_value
__A : Any = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
__A , __A : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 291 | 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCamelCase__ :
def __init__(self : Union[str, Any] , snake_case_ : int , snake_case_ : int=1_3 , snake_case_ : Dict=7 , snake_case_ : List[str]=True , snake_case_ : Dict=True , snake_case_ : Union[str, Any]=True , snake_case_ : List[Any]=True , snake_case_ : Tuple=9_9 , snake_case_ : Tuple=3_2 , snake_case_ : List[Any]=2 , snake_case_ : Tuple=4 , snake_case_ : str=3_7 , snake_case_ : Union[str, Any]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : str=0.1 , snake_case_ : int=5_1_2 , snake_case_ : List[Any]=1_6 , snake_case_ : Tuple=2 , snake_case_ : Optional[Any]=0.02 , snake_case_ : Optional[Any]=3 , snake_case_ : Optional[int]=4 , snake_case_ : int=None , ):
__a : Optional[int] = parent
__a : int = 1_3
__a : str = 7
__a : Union[str, Any] = True
__a : int = True
__a : Tuple = True
__a : Tuple = True
__a : List[Any] = 9_9
__a : Optional[int] = 3_2
__a : List[Any] = 2
__a : Optional[Any] = 4
__a : Tuple = 3_7
__a : Dict = '''gelu'''
__a : int = 0.1
__a : List[Any] = 0.1
__a : Union[str, Any] = 5_1_2
__a : Tuple = 1_6
__a : Optional[Any] = 2
__a : Dict = 0.02
__a : List[Any] = 3
__a : Any = 4
__a : List[str] = None
def lowerCAmelCase (self : List[Any] ):
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[str] = None
if self.use_input_mask:
__a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : List[str] = None
__a : List[Any] = None
__a : Optional[Any] = None
if self.use_labels:
__a : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__a : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase (self : Dict , snake_case_ : Tuple , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] ):
__a : Union[str, Any] = TFRoFormerModel(config=snake_case_ )
__a : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a : List[Any] = [input_ids, input_mask]
__a : str = model(snake_case_ )
__a : Any = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase (self : Any , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Dict ):
__a : Union[str, Any] = True
__a : Dict = TFRoFormerForCausalLM(config=snake_case_ )
__a : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a : Union[str, Any] = model(snake_case_ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowerCAmelCase (self : Any , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : int , snake_case_ : Tuple ):
__a : Optional[int] = TFRoFormerForMaskedLM(config=snake_case_ )
__a : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase (self : Dict , snake_case_ : str , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
__a : List[Any] = self.num_labels
__a : Optional[Any] = TFRoFormerForSequenceClassification(config=snake_case_ )
__a : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase (self : int , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Optional[Any] ):
__a : List[Any] = self.num_choices
__a : Any = TFRoFormerForMultipleChoice(config=snake_case_ )
__a : Any = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
__a : Tuple = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
__a : int = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
__a : Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__a : Any = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase (self : Any , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : int ):
__a : Any = self.num_labels
__a : str = TFRoFormerForTokenClassification(config=snake_case_ )
__a : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase (self : Dict , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[Any] ):
__a : Optional[int] = TFRoFormerForQuestionAnswering(config=snake_case_ )
__a : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase (self : int ):
__a : List[str] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[Any] = config_and_inputs
__a : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE : int = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
def lowerCAmelCase (self : List[str] , snake_case_ : Any , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[int] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCAmelCase (self : Any ):
__a : List[Any] = TFRoFormerModelTester(self )
__a : int = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def lowerCAmelCase (self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase (self : str ):
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase (self : Union[str, Any] ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*snake_case_ )
def lowerCAmelCase (self : Dict ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowerCAmelCase (self : str ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCAmelCase (self : Dict ):
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase (self : Tuple ):
__a : Any = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(snake_case_ )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : Union[str, Any] ):
__a : Optional[Any] = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__a : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a : Dict = model(snake_case_ )[0]
# TODO Replace vocab size
__a : Dict = 5_0_0_0_0
__a : List[str] = [1, 6, vocab_size]
self.assertEqual(output.shape , snake_case_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__a : Tuple = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = 1e-4
def lowerCAmelCase (self : Optional[Any] ):
__a : int = tf.constant([[4, 1_0]] )
__a : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__a : Optional[int] = emba(input_ids.shape )
__a : List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(snake_case_ , snake_case_ , atol=self.tolerance )
def lowerCAmelCase (self : List[str] ):
__a : List[Any] = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__a : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__a : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(snake_case_ , snake_case_ , atol=self.tolerance )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[Any] = 1e-4
def lowerCAmelCase (self : str ):
# 2,12,16,64
__a : Optional[Any] = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__a : str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__a : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__a : Optional[Any] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__a , __a : Tuple = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
snake_case_ , snake_case_ , snake_case_ )
__a : Optional[Any] = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__a : int = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , snake_case_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , snake_case_ , atol=self.tolerance )
| 216 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCamelCase__ :
def __init__(self : List[Any] , snake_case_ : int , snake_case_ : List[str]=1_3 , snake_case_ : Tuple=7 , snake_case_ : List[Any]=True , snake_case_ : List[Any]=True , snake_case_ : Dict=True , snake_case_ : Optional[int]=True , snake_case_ : str=9_9 , snake_case_ : Dict=6_4 , snake_case_ : Any=3_2 , snake_case_ : str=5 , snake_case_ : int=4 , snake_case_ : List[Any]=3_7 , snake_case_ : Any="gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : str=5_1_2 , snake_case_ : Any=1_6 , snake_case_ : str=2 , snake_case_ : int=0.02 , snake_case_ : Union[str, Any]=3 , snake_case_ : Optional[Any]=4 , snake_case_ : List[Any]=None , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : Any = seq_length
__a : int = is_training
__a : Optional[int] = use_input_mask
__a : List[Any] = use_token_type_ids
__a : Dict = use_labels
__a : Tuple = vocab_size
__a : str = hidden_size
__a : List[Any] = embedding_size
__a : List[Any] = num_hidden_layers
__a : str = num_attention_heads
__a : str = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Optional[Any] = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Union[str, Any] = max_position_embeddings
__a : Any = type_vocab_size
__a : int = type_sequence_label_size
__a : int = initializer_range
__a : int = num_labels
__a : Union[str, Any] = num_choices
__a : Dict = scope
def lowerCAmelCase (self : str ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_input_mask:
__a : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[Any] = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Dict = None
__a : List[str] = None
__a : Optional[Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase (self : int ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase (self : str , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : int , snake_case_ : int , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Any ):
__a : Any = MobileBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[str] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
__a : Optional[Any] = model(snake_case_ , token_type_ids=snake_case_ )
__a : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase (self : Any , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : str , snake_case_ : List[Any] ):
__a : str = MobileBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase (self : Tuple , snake_case_ : Any , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Dict ):
__a : Optional[Any] = MobileBertForNextSentencePrediction(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : int = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase (self : Any , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[Any] ):
__a : str = MobileBertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Union[str, Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , next_sentence_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase (self : Dict , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Dict , snake_case_ : int , snake_case_ : int , snake_case_ : str , snake_case_ : str ):
__a : str = MobileBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Optional[Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase (self : Optional[int] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Optional[int] ):
__a : Any = self.num_labels
__a : Union[str, Any] = MobileBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase (self : List[Any] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Optional[int] ):
__a : Union[str, Any] = self.num_labels
__a : str = MobileBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Any = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Union[str, Any] ):
__a : Union[str, Any] = self.num_choices
__a : List[str] = MobileBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Any = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase (self : Optional[Any] ):
__a : Optional[Any] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = config_and_inputs
__a : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Any = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def lowerCAmelCase (self : str , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any]=False ):
__a : List[str] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
__a : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
__a : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowerCAmelCase (self : Tuple ):
__a : List[Any] = MobileBertModelTester(self )
__a : int = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def lowerCAmelCase (self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase (self : Optional[Any] ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case_ )
def lowerCAmelCase (self : str ):
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ )
def lowerCAmelCase (self : Dict ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ )
def lowerCAmelCase (self : int ):
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ )
def lowerCAmelCase (self : List[Any] ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ )
def lowerCAmelCase (self : int ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
return torch.tensor(
lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ , )
lowercase__ =1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : Any ):
__a : Dict = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(snake_case_ )
__a : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
__a : str = model(snake_case_ )[0]
__a : List[Any] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape , snake_case_ )
__a : Union[str, Any] = torch.tensor(
[
[
[-2.473_6526E07, 8.269_1656E04, 1.652_1838E05],
[-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00],
[2.604_7359E00, 1.567_7652E00, -1.732_4188E-01],
]
] , device=snake_case_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__a : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__a : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 216 | 1 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__UpperCamelCase : Any = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any]=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=a__ ) )
class a ( a__ ):
snake_case__ = None
snake_case__ = None
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase = dataset_module_factory(_snake_case , cache_dir=_snake_case )
lowerCAmelCase = import_main_class(dataset_module.module_path , dataset=_snake_case )
lowerCAmelCase = builder_cls(
cache_dir=_snake_case , config_name=_snake_case , hash=dataset_module.hash , )
lowerCAmelCase = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_snake_case ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowerCAmelCase = cached_path(_snake_case , cache_dir=_snake_case )
self.assertTrue(os.path.exists(_snake_case ) )
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowerCAmelCase = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase )
lowerCAmelCase = import_main_class(dataset_module.module_path )
lowerCAmelCase = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowerCAmelCase = None
builder_instance.download_and_prepare()
lowerCAmelCase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
lowerCAmelCase = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase )
lowerCAmelCase = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase )
lowerCAmelCase = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowerCAmelCase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _UpperCAmelCase )
assert next(iter(ds['train'] ) )
| 309 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309 | 1 |
import socket
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCamelCase :Union[str, Any] = socket.gethostname()
UpperCamelCase :Optional[int] = 1_2312
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
UpperCamelCase :List[Any] = sock.recv(1024 )
if not data:
break
out_file.write(__magic_name__ )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 38 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Any = """openai/whisper-base"""
snake_case__ : Optional[int] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
snake_case__ : Any = """transcriber"""
snake_case__ : Optional[int] = WhisperProcessor
snake_case__ : str = WhisperForConditionalGeneration
snake_case__ : Optional[Any] = ["""audio"""]
snake_case__ : Any = ["""text"""]
def _A ( self : str , __lowerCamelCase : Dict ):
return self.pre_processor(__lowerCamelCase , return_tensors="""pt""" ).input_features
def _A ( self : Dict , __lowerCamelCase : List[Any] ):
return self.model.generate(inputs=__lowerCamelCase )
def _A ( self : Any , __lowerCamelCase : Optional[Any] ):
return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0]
| 38 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class __a( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase = '''lilt'''
def __init__( self ,_SCREAMING_SNAKE_CASE=30_522 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-12 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE="absolute" ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=1_024 ,**_SCREAMING_SNAKE_CASE ,) -> Union[str, Any]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Any = type_vocab_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = position_embedding_type
UpperCAmelCase_ : Optional[int] = classifier_dropout
UpperCAmelCase_ : Dict = channel_shrink_ratio
UpperCAmelCase_ : Tuple = max_ad_position_embeddings
| 351 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = 42
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = True
@register_to_config
def __init__( self ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = ("DownEncoderBlock2D",) ,_SCREAMING_SNAKE_CASE = ("UpDecoderBlock2D",) ,_SCREAMING_SNAKE_CASE = (64,) ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = "silu" ,_SCREAMING_SNAKE_CASE = 4 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 0.1_82_15 ,) -> Optional[int]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[Any] = Encoder(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=_SCREAMING_SNAKE_CASE ,down_block_types=_SCREAMING_SNAKE_CASE ,block_out_channels=_SCREAMING_SNAKE_CASE ,layers_per_block=_SCREAMING_SNAKE_CASE ,act_fn=_SCREAMING_SNAKE_CASE ,norm_num_groups=_SCREAMING_SNAKE_CASE ,double_z=_SCREAMING_SNAKE_CASE ,)
# pass init params to Decoder
UpperCAmelCase_ : List[str] = Decoder(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=_SCREAMING_SNAKE_CASE ,up_block_types=_SCREAMING_SNAKE_CASE ,block_out_channels=_SCREAMING_SNAKE_CASE ,layers_per_block=_SCREAMING_SNAKE_CASE ,norm_num_groups=_SCREAMING_SNAKE_CASE ,act_fn=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : int = nn.Convad(2 * latent_channels ,2 * latent_channels ,1 )
UpperCAmelCase_ : Union[str, Any] = nn.Convad(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,1 )
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : List[Any] = self.config.sample_size
UpperCAmelCase_ : List[str] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size ,(list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : List[str] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : int = 0.25
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> int:
if isinstance(_SCREAMING_SNAKE_CASE ,(Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def a__ ( self ,_SCREAMING_SNAKE_CASE = True ) -> Optional[Any]:
UpperCAmelCase_ : Dict = use_tiling
def a__ ( self ) -> Optional[Any]:
self.enable_tiling(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : str = True
def a__ ( self ) -> Any:
UpperCAmelCase_ : Any = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : int = {}
def fn_recursive_add_processors(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE ,'''set_processor''' ):
UpperCAmelCase_ : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return processors
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(_SCREAMING_SNAKE_CASE )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE ,'''set_processor''' ):
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
module.set_processor(_SCREAMING_SNAKE_CASE )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for name, module in self.named_children():
fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : int = [self.encoder(_SCREAMING_SNAKE_CASE ) for x_slice in x.split(1 )]
UpperCAmelCase_ : List[str] = torch.cat(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Optional[Any] = self.encoder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = self.quant_conv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self.post_quant_conv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self.decoder(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
@apply_forward_hook
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : Tuple = [self._decode(_SCREAMING_SNAKE_CASE ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : str = torch.cat(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : str = self._decode(_SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : str = min(a.shape[2] ,b.shape[2] ,_SCREAMING_SNAKE_CASE )
for y in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Any = min(a.shape[3] ,b.shape[3] ,_SCREAMING_SNAKE_CASE )
for x in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Optional[int] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Any = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : str = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : Tuple = []
for i in range(0 ,x.shape[2] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = []
for j in range(0 ,x.shape[3] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : int = self.encoder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self.quant_conv(_SCREAMING_SNAKE_CASE )
row.append(_SCREAMING_SNAKE_CASE )
rows.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_v(rows[i - 1][j] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE ,dim=3 ) )
UpperCAmelCase_ : Optional[Any] = torch.cat(_SCREAMING_SNAKE_CASE ,dim=2 )
UpperCAmelCase_ : Union[str, Any] = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : str = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Any = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Tuple = []
for i in range(0 ,z.shape[2] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = []
for j in range(0 ,z.shape[3] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : List[str] = self.post_quant_conv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = self.decoder(_SCREAMING_SNAKE_CASE )
row.append(_SCREAMING_SNAKE_CASE )
rows.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Optional[int] = self.blend_v(rows[i - 1][j] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if j > 0:
UpperCAmelCase_ : Optional[int] = self.blend_h(row[j - 1] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE ,dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_SCREAMING_SNAKE_CASE ,dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = sample
UpperCAmelCase_ : Optional[Any] = self.encode(_SCREAMING_SNAKE_CASE ).latent_dist
if sample_posterior:
UpperCAmelCase_ : List[Any] = posterior.sample(generator=_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Union[str, Any] = posterior.mode()
UpperCAmelCase_ : List[Any] = self.decode(_SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE ) | 235 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__magic_name__: int = logging.get_logger(__name__)
__magic_name__: Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__: int = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__magic_name__: str = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
__magic_name__: List[Any] = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Optional[int] = ['''input_ids''', '''attention_mask''']
lowercase__ : List[Any] = DistilBertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Optional[int]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__magic_name__ : List[str] = getattr(lowerCAmelCase__ , normalizer_state.pop("""type""" ) )
__magic_name__ : List[str] = do_lower_case
__magic_name__ : Any = strip_accents
__magic_name__ : int = tokenize_chinese_chars
__magic_name__ : int = normalizer_class(**lowerCAmelCase__ )
__magic_name__ : str = do_lower_case
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> str:
__magic_name__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
__magic_name__ : Any = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 342 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 342 | 1 |
"""simple docstring"""
from collections.abc import Callable
def __A ( a_ :Callable[[float], float] , a_ :float , a_ :float) -> float:
__a : float = a
__a : float = b
if function(a_) == 0: # one of the a or b is a root for the function
return a
elif function(a_) == 0:
return b
elif (
function(a_) * function(a_) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''')
else:
__a : float = start + (end - start) / 2.0
while abs(start - mid) > 10**-7: # until precisely equals to 10^-7
if function(a_) == 0:
return mid
elif function(a_) * function(a_) < 0:
__a : Union[str, Any] = mid
else:
__a : List[str] = mid
__a : str = start + (end - start) / 2.0
return mid
def __A ( a_ :float) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod() | 188 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 188 | 1 |
'''simple docstring'''
import sys
lowercase : Tuple = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCAmelCase_ ( snake_case__ = N ):
'''simple docstring'''
A : List[Any] = -sys.maxsize - 1
for i in range(len(snake_case__ ) - 12 ):
A : Union[str, Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
A : Optional[int] = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 3 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCamelCase_ : Dict = self.get_image_processor(do_normalize=A )
lowerCamelCase_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = self.prepare_image_inputs()
lowerCamelCase_ : List[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Optional[int] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : int = processor(text=A )
lowerCamelCase_ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : List[Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Union[str, Any] = processor.batch_decode(A )
lowerCamelCase_ : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : str = self.prepare_image_inputs()
lowerCamelCase_ : int = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Tuple:
A__ : Union[str, Any] = [[0 for _ in range(UpperCAmelCase_ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
A__ : List[Any] = 1
for n in range(m + 1 ):
for k in range(1, UpperCAmelCase_ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
A_ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
A_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 353 |
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
for e in env_keys:
A__ : List[Any] = int(os.environ.get(UpperCAmelCase__, -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=False ) ->List[str]:
A__ : List[Any] = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return strtobool(UpperCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]="no" ) ->int:
A__ : str = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return value
| 296 | 0 |
from collections import defaultdict
from math import gcd
def UpperCamelCase_( lowerCamelCase_ = 150_0000 ) -> int:
_lowercase : defaultdict = defaultdict(lowerCamelCase_ )
_lowercase : Tuple = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
_lowercase : Union[str, Any] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 21 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCamelCase, '''depth_multiplier''' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str], lowerCamelCase : List[str], lowerCamelCase : Optional[Any]=13, lowerCamelCase : List[str]=3, lowerCamelCase : List[str]=32, lowerCamelCase : Union[str, Any]=0.25, lowerCamelCase : int=8, lowerCamelCase : Dict=True, lowerCamelCase : Optional[int]=1_024, lowerCamelCase : List[str]=32, lowerCamelCase : Optional[int]="relu6", lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : List[Any]=True, lowerCamelCase : Any=True, lowerCamelCase : Dict=10, lowerCamelCase : Optional[int]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = depth_multiplier
lowercase__ = min_depth
lowercase__ = tf_padding
lowercase__ = int(last_hidden_size * depth_multiplier )
lowercase__ = output_stride
lowercase__ = hidden_act
lowercase__ = classifier_dropout_prob
lowercase__ = use_labels
lowercase__ = is_training
lowercase__ = num_labels
lowercase__ = initializer_range
lowercase__ = scope
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def lowercase__ ( self : List[str], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = MobileNetVaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def lowercase__ ( self : str, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any], lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = MobileNetVaForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = MobileNetVaModelTester(self )
lowercase__ = MobileNetVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def lowercase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : List[str] ):
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = 26
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = MobileNetVaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : str ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
# verify the logits
lowercase__ = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
| 207 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class _lowerCAmelCase ( unittest.TestCase , _UpperCamelCase ):
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = load_tool('text-to-speech' )
self.tool.setup()
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = self.tool('hey' )
lowerCAmelCase__ :str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Dict = self.tool('hey' )
lowerCAmelCase__ :Dict = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
| 368 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1e-12 ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Tuple = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T
lowerCAmelCase__ :int = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T
return jnp.matmul(_SCREAMING_SNAKE_CASE , norm_emb_a.T )
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
__magic_name__ :CLIPConfig
__magic_name__ :jnp.dtype = jnp.floataa
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
lowerCAmelCase__ :str = nn.Dense(self.config.projection_dim , use_bias=__UpperCAmelCase , dtype=self.dtype )
lowerCAmelCase__ :Optional[Any] = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
lowerCAmelCase__ :Optional[int] = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowerCAmelCase__ :Any = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) )
lowerCAmelCase__ :List[Any] = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.vision_model(__UpperCAmelCase )[1]
lowerCAmelCase__ :Optional[int] = self.visual_projection(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = jax_cosine_distance(__UpperCAmelCase , self.special_care_embeds )
lowerCAmelCase__ :Tuple = jax_cosine_distance(__UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase__ :Dict = 0.0
lowerCAmelCase__ :List[str] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase__ :Optional[Any] = jnp.round(__UpperCAmelCase , 3 )
lowerCAmelCase__ :Tuple = jnp.any(special_scores > 0 , axis=1 , keepdims=__UpperCAmelCase )
# Use a lower threshold if an image has any special care concept
lowerCAmelCase__ :List[Any] = is_special_care * 0.01
lowerCAmelCase__ :Union[str, Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase__ :Any = jnp.round(__UpperCAmelCase , 3 )
lowerCAmelCase__ :Tuple = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Tuple = CLIPConfig
__magic_name__ :Tuple = """clip_input"""
__magic_name__ :str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = jnp.floataa , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
if input_shape is None:
lowerCAmelCase__ :Dict = (1, 2_2_4, 2_2_4, 3)
lowerCAmelCase__ :Any = self.module_class(config=__UpperCAmelCase , dtype=__UpperCAmelCase , **__UpperCAmelCase )
super().__init__(__UpperCAmelCase , __UpperCAmelCase , input_shape=__UpperCAmelCase , seed=__UpperCAmelCase , dtype=__UpperCAmelCase , _do_init=_do_init )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :str = jax.random.normal(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = jax.random.split(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = {'params': params_rng, 'dropout': dropout_rng}
lowerCAmelCase__ :Optional[int] = self.module.init(__UpperCAmelCase , __UpperCAmelCase )['params']
return random_params
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = jnp.transpose(__UpperCAmelCase , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) , rngs={} , )
| 254 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : str = logging.get_logger(__name__)
def _lowerCamelCase ( lowercase : int , lowercase : Union[str, Any]=False ) -> Optional[Any]:
_a = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_a = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str]=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
_a = ""
else:
_a = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_a = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[
: config.hidden_size, :
]
_a = in_proj_bias[: config.hidden_size]
_a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a = in_proj_weight[
-config.hidden_size :, :
]
_a = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( lowercase : str ) -> List[str]:
_a = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] , lowercase : Union[str, Any] ) -> int:
_a = dct.pop(lowercase )
_a = val
def _lowerCamelCase ( ) -> Dict:
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : List[str] , lowercase : List[Any]=False ) -> List[str]:
_a = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=lowercase , )
_a = ViTHybridConfig(backbone_config=lowercase , image_size=384 , num_labels=1000 )
_a = False
# load original model from timm
_a = timm.create_model(lowercase , pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_a = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_a = create_rename_keys(lowercase , lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_q_k_v(lowercase , lowercase , lowercase )
_a = "huggingface/label-files"
_a = "imagenet-1k-id2label.json"
_a = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
_a = {int(lowercase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_a = ViTHybridModel(lowercase ).eval()
else:
_a = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_a = create_transform(**resolve_data_config({} , model=lowercase ) )
_a = transform.transforms
_a = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_a = ViTHybridImageProcessor(
do_resize=lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_a = prepare_img()
_a = transform(lowercase ).unsqueeze(0 )
_a = processor(lowercase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase , lowercase )
# verify logits
with torch.no_grad():
_a = model(lowercase )
_a = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
_a = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase , outputs.pooler_output , atol=1E-3 )
else:
_a = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(F'Pushing model and processor to the hub {vit_name}' )
model.push_to_hub(F'ybelkada/{vit_name}' )
processor.push_to_hub(F'ybelkada/{vit_name}' )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 63 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 100 ) -> int:
_a = 0
_a = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 | 1 |
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [False] * len(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = [s]
_lowerCAmelCase : Dict = True
while queue:
_lowerCAmelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCAmelCase : Any = True
_lowerCAmelCase : Optional[int] = u
return visited[t]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [-1] * (len(_lowerCamelCase ))
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Tuple = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Dict = float("Inf" )
_lowerCAmelCase : Any = sink
while s != source:
# Find the minimum value in select path
_lowerCAmelCase : Dict = min(_lowerCamelCase , graph[parent[s]][s] )
_lowerCAmelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_lowerCAmelCase : List[str] = sink
while v != source:
_lowerCAmelCase : List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCAmelCase : str = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 300 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300 | 1 |
import functools
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] ):
a__ = len(__lowerCAmelCase )
a__ = len(__lowerCAmelCase )
@functools.cache
def min_distance(__lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
a__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __lowerCAmelCase ) , 1 + min_distance(__lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 240 |
import argparse
import os
import re
import packaging.version
_A : Optional[int] = 'examples/'
_A : str = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_A : Any = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_A : List[str] = 'README.md'
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : Tuple = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Union[str, Any] = replace.replace('''VERSION''' , UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = re_pattern.sub(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , pattern='''examples''' )
def _a ( UpperCAmelCase , UpperCAmelCase=False ) -> Dict:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not patch:
update_version_in_examples(UpperCAmelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Any = '''🤗 Transformers currently provides the following architectures'''
lowerCamelCase__ : Dict = '''1. Want to contribute a new model?'''
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : str = f.readlines()
# Find the start of the list.
lowerCamelCase__ : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCamelCase__ : Any = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ : Any = REPLACE_PATTERNS['''init'''][0].search(UpperCAmelCase ).groups()[0]
return packaging.version.parse(UpperCAmelCase )
def _a ( UpperCAmelCase=False ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCamelCase__ : Union[str, Any] = default_version.base_version
elif patch:
lowerCamelCase__ : str = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
lowerCamelCase__ : Dict = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : str = input(f"Which version are you releasing? [{default_version}]" )
if len(UpperCAmelCase ) == 0:
lowerCamelCase__ : int = default_version
print(f"Updating version to {version}." )
global_version_update(UpperCAmelCase , patch=UpperCAmelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = get_version()
lowerCamelCase__ : Optional[int] = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
lowerCamelCase__ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : Dict = input(f"Which version are we developing now? [{dev_version}]" )
if len(UpperCAmelCase ) == 0:
lowerCamelCase__ : Tuple = dev_version
print(f"Updating version to {version}." )
global_version_update(UpperCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : Any = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_A : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 142 | 0 |
lowerCamelCase_ : Optional[int] = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 368 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def A__ ( ) -> Union[str, Any]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCamelCase_: Optional[int] = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , lowerCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def A__ ( ) -> Union[str, Any]:
assert _test_patching.open is open
UpperCamelCase_: List[Any] = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , lowerCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def A__ ( ) -> Optional[Any]:
# pandas.read_csv is not present in _test_patching
UpperCamelCase_: Optional[Any] = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , lowerCamelCase ):
pass
def A__ ( ) -> Any:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
UpperCamelCase_: List[Any] = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , lowerCamelCase ) is None
with patch_submodule(_test_patching , """len""" , lowerCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def A__ ( ) -> Any:
UpperCamelCase_: Dict = """__test_patch_submodule_start_and_stop_mock__"""
UpperCamelCase_: List[str] = patch_submodule(_test_patching , """open""" , lowerCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def A__ ( ) -> List[str]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCamelCase_: Optional[Any] = """__test_patch_submodule_successive_join__"""
UpperCamelCase_: Any = """__test_patch_submodule_successive_dirname__"""
UpperCamelCase_: Dict = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.rename""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.path.join""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def A__ ( ) -> Union[str, Any]:
UpperCamelCase_: Dict = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , lowerCamelCase ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , lowerCamelCase ):
pass
| 223 | 0 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase_ : int = """bart"""
UpperCAmelCase_ : Dict = True
@st.cache(allow_output_mutation=__a )
def _A () -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
SCREAMING_SNAKE_CASE_ : Dict = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = qar_model.eval()
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = (None, None)
if MODEL_TYPE == "bart":
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
SCREAMING_SNAKE_CASE_ : List[str] = sas_model.eval()
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__a )
def _A () -> Optional[int]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE_ : int = faiss.StandardGpuResources()
SCREAMING_SNAKE_CASE_ : int = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
SCREAMING_SNAKE_CASE_ : str = faiss.IndexFlatIP(1_28 )
SCREAMING_SNAKE_CASE_ : Dict = faiss.index_cpu_to_gpu(__a , 1 , __a )
wikiaab_gpu_index_flat.add(__a ) # TODO fix for larger GPU
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = (None, None)
SCREAMING_SNAKE_CASE_ : List[Any] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__a )
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
SCREAMING_SNAKE_CASE_ : str = elia['''train_eli5''']
SCREAMING_SNAKE_CASE_ : Optional[int] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
SCREAMING_SNAKE_CASE_ : str = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = load_indexes()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = load_models()
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = load_train_data()
def _A (__a , __a=10 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = embed_questions_for_retrieval([question] , __a , __a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = eli5_train_q_index.search(__a , __a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [elia_train[int(__a )] for i in I[0]]
return nn_examples
def _A (__a , __a="wiki40b" , __a="dense" , __a=10 ) -> str:
"""simple docstring"""
if source == "none":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = query_qa_dense_index(
__a , __a , __a , __a , __a , __a )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = query_es_index(
__a , __a , index_name='''english_wiki40b_snippets_100w''' , n_results=__a , )
SCREAMING_SNAKE_CASE_ : str = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
SCREAMING_SNAKE_CASE_ : Optional[int] = '''question: {} context: {}'''.format(__a , __a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __a : None),
} )
def _A (__a , __a , __a , __a=64 , __a=2_56 , __a=False , __a=2 , __a=0.95 , __a=0.8 ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Tuple = qa_sas_generate(
__a , __a , __a , num_answers=1 , num_beams=__a , min_len=__a , max_len=__a , do_sample=__a , temp=__a , top_p=__a , top_k=__a , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
UpperCAmelCase_ : Any = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
UpperCAmelCase_ : Optional[int] = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase_ : Any = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase_ : Optional[Any] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
UpperCAmelCase_ : Optional[Any] = st.sidebar.checkbox("""Demo options""")
if demo_options:
UpperCAmelCase_ : Optional[Any] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
UpperCAmelCase_ : Tuple = action_list.index(action_st)
UpperCAmelCase_ : Optional[Any] = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
UpperCAmelCase_ : Union[str, Any] = show_type == """Show full text of passages"""
else:
UpperCAmelCase_ : int = 3
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[Any] = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
UpperCAmelCase_ : Optional[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
UpperCAmelCase_ : int = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
UpperCAmelCase_ : Dict = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
UpperCAmelCase_ : List[Any] = """wiki40b"""
UpperCAmelCase_ : str = """dense"""
UpperCAmelCase_ : Any = """beam"""
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : Optional[int] = 64
UpperCAmelCase_ : Any = 256
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = st.sidebar.checkbox("""Generation options""")
if generate_options:
UpperCAmelCase_ : int = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
UpperCAmelCase_ : Optional[int] = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
UpperCAmelCase_ : Optional[Any] = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase_ : Optional[int] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase_ : Union[str, Any] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase_ : str = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
UpperCAmelCase_ : str = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
UpperCAmelCase_ : Union[str, Any] = None
# start main text
UpperCAmelCase_ : Optional[int] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
UpperCAmelCase_ : Union[str, Any] = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase_ : Optional[int] = st.text_input("""Enter your question here:""", """""")
else:
UpperCAmelCase_ : int = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase_ , UpperCAmelCase_ : str = make_support(question, source=wiki_source, method="""dense""", n_results=10)
UpperCAmelCase_ , UpperCAmelCase_ : int = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
UpperCAmelCase_ : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase_ : List[Any] = support_list[:10]
UpperCAmelCase_ : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
UpperCAmelCase_ , UpperCAmelCase_ : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase_ , UpperCAmelCase_ : str = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
UpperCAmelCase_ : Optional[int] = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
UpperCAmelCase_ : Optional[int] = res[1].strip()
if sec_titles == "":
UpperCAmelCase_ : Dict = """[{}]({})""".format(res[0], wiki_url)
else:
UpperCAmelCase_ : str = sec_titles.split(""" & """)
UpperCAmelCase_ : Dict = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase_ : int = find_nearest_training(question)
UpperCAmelCase_ : List[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
UpperCAmelCase_ : str = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
UpperCAmelCase_ : Dict = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 91 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ) -> Union[str, Any]:
lowerCamelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=snake_case__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=snake_case__ )
return parser.parse_args()
def a__ ( ) -> List[str]:
lowerCamelCase = parse_args()
# Import training_script as a module.
lowerCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase = script_fpath.stem
lowerCamelCase = importlib.import_module(snake_case__ )
# Patch sys.argv
lowerCamelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 291 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _UpperCamelCase (a__ :str = "" ):
"""simple docstring"""
UpperCamelCase__ = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
UpperCamelCase__ = BeautifulSoup(requests.get(a__ ).text , """html.parser""" )
UpperCamelCase__ = soup.find_all("""td""" , attrs="""titleColumn""" )
UpperCamelCase__ = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(a__ , a__ )
}
def _UpperCamelCase (a__ :str = "IMDb_Top_250_Movies.csv" ):
"""simple docstring"""
UpperCamelCase__ = get_imdb_top_aaa_movies()
with open(a__ , """w""" , newline="""""" ) as out_file:
UpperCamelCase__ = csv.writer(a__ )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 87 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _UpperCamelCase (a__ :Dict[str, torch.Tensor] ):
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = []
for rt in rc.restypes:
UpperCamelCase__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCamelCase__ = {name: i for i, name in enumerate(a__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCamelCase__ = torch.tensor(
a__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
UpperCamelCase__ = torch.tensor(
a__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
UpperCamelCase__ = torch.tensor(
a__ , dtype=torch.floataa , device=protein["""aatype"""].device , )
UpperCamelCase__ = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCamelCase__ = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase__ = restype_atomaa_mask[protein_aatype]
UpperCamelCase__ = residx_atomaa_mask
UpperCamelCase__ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCamelCase__ = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase__ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCamelCase__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCamelCase__ = rc.restype_atoa[restype_letter]
UpperCamelCase__ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCamelCase__ = rc.atom_order[atom_name]
UpperCamelCase__ = 1
UpperCamelCase__ = restype_atomaa_mask[protein_aatype]
UpperCamelCase__ = residx_atomaa_mask
return protein
def _UpperCamelCase (a__ :Dict[str, torch.Tensor] ):
"""simple docstring"""
UpperCamelCase__ = tree_map(lambda a__ : torch.tensor(a__ , device=batch["""aatype"""].device ) , a__ , np.ndarray )
UpperCamelCase__ = tensor_tree_map(lambda a__ : np.array(a__ ) , make_atomaa_masks(a__ ) )
return out
| 87 | 1 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCamelCase_ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def _UpperCAmelCase ( _lowerCamelCase : Tuple=True ) -> int:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a ) )
class a_ (_a ):
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : str = None
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
with TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : str = dataset_module_factory(snake_case_ , cache_dir=snake_case_ )
_lowerCAmelCase : Optional[int] = import_main_class(dataset_module.module_path , dataset=snake_case_ )
_lowerCAmelCase : DatasetBuilder = builder_cls(
cache_dir=snake_case_ , config_name=snake_case_ , hash=dataset_module.hash , )
_lowerCAmelCase : Optional[int] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=snake_case_ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
_lowerCAmelCase : str = cached_path(snake_case_ , cache_dir=snake_case_ )
self.assertTrue(os.path.exists(snake_case_ ) )
@pytest.mark.integration
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Optional[int]:
_lowerCAmelCase : List[str] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
_lowerCAmelCase : Tuple = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCamelCase )
_lowerCAmelCase : Dict = import_main_class(dataset_module.module_path )
_lowerCAmelCase : DatasetBuilder = builder_cls(
cache_dir=_lowerCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_lowerCAmelCase : Any = None
builder_instance.download_and_prepare()
_lowerCAmelCase : str = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _UpperCAmelCase ( _lowerCamelCase : Dict ) -> str:
_lowerCAmelCase : Optional[int] = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCamelCase )
_lowerCAmelCase : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCamelCase )
_lowerCAmelCase : DatasetBuilder = builder_cls(
cache_dir=_lowerCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
_lowerCAmelCase : Dict = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , _lowerCamelCase )
assert next(iter(ds["""train"""] ) )
| 309 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __A ( _lowercase ):
'''simple docstring'''
_A = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
_A = True if '''large''' in model_name or '''huge''' in model_name else False
_A = True if '''large''' in model_name or '''huge''' in model_name else False
_A = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_A = [3, 3, 3, 3]
_A = [5, 5, 5, 5]
elif "fl4" in model_name:
_A = [4, 4, 4, 4]
_A = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_A = [3, 3, 3, 3]
if "lrf" in model_name:
_A = [3, 3, 3, 3]
else:
_A = [2, 2, 2, 2]
if "tiny" in model_name:
_A = 96
elif "small" in model_name:
_A = 96
elif "base" in model_name:
_A = 1_28
elif "large" in model_name:
_A = 1_92
elif "xlarge" in model_name:
_A = 2_56
elif "huge" in model_name:
_A = 3_52
# set label information
_A = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
_A = '''imagenet-22k-id2label.json'''
else:
_A = '''imagenet-1k-id2label.json'''
_A = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
_A = {int(_lowercase ): v for k, v in idalabel.items()}
_A = {v: k for k, v in idalabel.items()}
_A = FocalNetConfig(
embed_dim=_lowercase , depths=_lowercase , focal_levels=_lowercase , focal_windows=_lowercase , use_conv_embed=_lowercase , idalabel=_lowercase , labelaid=_lowercase , use_post_layernorm=_lowercase , use_layerscale=_lowercase , )
return config
def __A ( _lowercase ):
'''simple docstring'''
if "patch_embed.proj" in name:
_A = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_A = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_A = '''encoder.''' + name
if "encoder.layers" in name:
_A = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
_A = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
_A = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_A = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_A = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_A = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
_A = '''layernorm.weight'''
if name == "norm.bias":
_A = '''layernorm.bias'''
if "head" in name:
_A = name.replace('''head''' , '''classifier''' )
else:
_A = '''focalnet.''' + name
return name
def __A ( _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
_A = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
_A = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _lowercase )
_A = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
_A = state_dict.pop(_lowercase )
_A = val
_A = get_focalnet_config(_lowercase )
_A = FocalNetForImageClassification(_lowercase )
model.eval()
# load state dict
model.load_state_dict(_lowercase )
# verify conversion
_A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowercase , crop_size=2_24 , do_normalize=_lowercase , image_mean=_lowercase , image_std=_lowercase , )
_A = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
_A = processor(images=_lowercase , return_tensors='''pt''' )
_A = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_A = image_transforms(_lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowercase , atol=1e-4 )
_A = model(**_lowercase )
_A = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_A = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_A = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_A = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_A = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_A = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_A = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 75 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
_A = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(_lowercase ):
os.makedirs(_lowercase )
_A = model.state_dict()
def to_tf_var_name(_lowercase ):
for patt, repl in iter(_lowercase ):
_A = name.replace(_lowercase , _lowercase )
return f"""bert/{name}"""
def create_tf_var(_lowercase , _lowercase , _lowercase ):
_A = tf.dtypes.as_dtype(tensor.dtype )
_A = tf.get_variable(dtype=_lowercase , shape=tensor.shape , name=_lowercase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowercase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_A = to_tf_var_name(_lowercase )
_A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_A = torch_tensor.T
_A = create_tf_var(tensor=_lowercase , name=_lowercase , session=_lowercase )
tf.keras.backend.set_value(_lowercase , _lowercase )
_A = session.run(_lowercase )
print(f"""Successfully created {tf_name}: {np.allclose(_lowercase , _lowercase )}""" )
_A = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowercase , os.path.join(_lowercase , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def __A ( _lowercase=None ):
'''simple docstring'''
_A = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_lowercase , required=_lowercase , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=_lowercase , default=_lowercase , required=_lowercase , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=_lowercase , required=_lowercase , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=_lowercase , required=_lowercase , help='''Directory in which to save tensorflow model''' )
_A = parser.parse_args(_lowercase )
_A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowercase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 75 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.dummy_uncond_unet
_UpperCamelCase : List[Any] = ScoreSdeVeScheduler()
_UpperCamelCase : Optional[int] = ScoreSdeVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
sde_ve.to(lowerCamelCase__ )
sde_ve.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Tuple = torch.manual_seed(0 )
_UpperCamelCase : str = sde_ve(num_inference_steps=2 ,output_type='numpy' ,generator=lowerCamelCase__ ).images
_UpperCamelCase : Dict = torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = sde_ve(num_inference_steps=2 ,output_type='numpy' ,generator=lowerCamelCase__ ,return_dict=lowerCamelCase__ )[
0
]
_UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : List[str] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : List[str] = 'google/ncsnpp-church-256'
_UpperCamelCase : Union[str, Any] = UNetaDModel.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : List[str] = ScoreSdeVeScheduler.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : int = ScoreSdeVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
sde_ve.to(lowerCamelCase__ )
sde_ve.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
_UpperCamelCase : int = sde_ve(num_inference_steps=10 ,output_type='numpy' ,generator=lowerCamelCase__ ).images
_UpperCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase : int = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 83 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "roformer"
def __init__( self , _a=5_0_0_0_0 , _a=None , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_5_3_6 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=0 , _a=False , _a=True , **_a , ) -> List[str]:
super().__init__(pad_token_id=_a , **_a )
_a : Tuple = vocab_size
_a : List[Any] = hidden_size if embedding_size is None else embedding_size
_a : Any = hidden_size
_a : Any = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : str = hidden_act
_a : Any = intermediate_size
_a : Dict = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : str = max_position_embeddings
_a : Dict = type_vocab_size
_a : List[Any] = initializer_range
_a : Dict = layer_norm_eps
_a : Dict = rotary_value
_a : Dict = use_cache
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : List[Any] = {0: '''batch''', 1: '''sequence'''}
_a : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 235 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase ( a ):
lowercase__ : Any = """yolos"""
def __init__( self : Tuple , _UpperCamelCase : str=768 , _UpperCamelCase : Any=12 , _UpperCamelCase : int=12 , _UpperCamelCase : List[Any]=3_072 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Any=0.0 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : Optional[Any]=0.0_2 , _UpperCamelCase : Union[str, Any]=1e-12 , _UpperCamelCase : Tuple=[512, 864] , _UpperCamelCase : Optional[int]=16 , _UpperCamelCase : str=3 , _UpperCamelCase : int=True , _UpperCamelCase : Union[str, Any]=100 , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : List[Any]=False , _UpperCamelCase : List[Any]=1 , _UpperCamelCase : Union[str, Any]=5 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : Optional[Any]=5 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Tuple=0.1 , **_UpperCamelCase : int , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = num_detection_tokens
SCREAMING_SNAKE_CASE = use_mid_position_embeddings
SCREAMING_SNAKE_CASE = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE = class_cost
SCREAMING_SNAKE_CASE = bbox_cost
SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE = bbox_loss_coefficient
SCREAMING_SNAKE_CASE = giou_loss_coefficient
SCREAMING_SNAKE_CASE = eos_coefficient
class lowercase ( a ):
lowercase__ : Union[str, Any] = version.parse("""1.11""" )
@property
def __snake_case( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __snake_case( self : str ) -> float:
'''simple docstring'''
return 1e-4
@property
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
return 12
| 352 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase ( a ):
lowercase__ : Optional[Any] = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
lowercase__ : Optional[int] = """CIDAS/clipseg-rd64-refined"""
lowercase__ : Tuple = """image_segmenter"""
lowercase__ : Optional[Any] = CLIPSegForImageSegmentation
lowercase__ : int = ["""image""", """text"""]
lowercase__ : List[str] = ["""image"""]
def __init__( self : str , *_UpperCamelCase : str , **_UpperCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : "Image" , _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=_UpperCamelCase , return_tensors="pt" )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE = self.model(**_UpperCamelCase ).logits
return logits
def __snake_case( self : Any , _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 206 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCamelCase__ : ClassVar[Features] = Features({'audio': Audio()} )
lowerCamelCase__ : ClassVar[Features] = Features({'labels': ClassLabel} )
lowerCamelCase__ : str = "audio"
lowerCamelCase__ : str = "labels"
def _UpperCAmelCase ( self, lowercase_ ) -> int:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column], lowercase_ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
a__ =copy.deepcopy(self )
a__ =self.label_schema.copy()
a__ =features[self.label_column]
a__ =label_schema
return task_template
@property
def _UpperCAmelCase ( self ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 188 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = IFInpaintingSuperResolutionPipeline
lowerCamelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCamelCase__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCamelCase__ : str = PipelineTesterMixin.required_optional_params - {'latents'}
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def _UpperCAmelCase ( self, lowercase_, lowercase_=0 ) -> Tuple:
"""simple docstring"""
if str(lowercase_ ).startswith('''mps''' ):
a__ =torch.manual_seed(lowercase_ )
else:
a__ =torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
a__ =floats_tensor((1, 3, 16, 16), rng=random.Random(lowercase_ ) ).to(lowercase_ )
a__ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowercase_ ) ).to(lowercase_ )
a__ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowercase_ ) ).to(lowercase_ )
a__ ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''', reason='''float16 requires CUDA''' )
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self._test_save_load_local()
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2, )
| 188 | 1 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id', ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path', ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision', [None, 'v2'] )
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : Tuple = hf_hub_url(repo_id=a_, path=a_, revision=a_ )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(a_ )}"""
| 205 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_A = logging.get_logger('transformers.models.speecht5')
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
hf_model.apply_weight_norm()
lowerCamelCase : str = checkpoint['input_conv.weight_g']
lowerCamelCase : int = checkpoint['input_conv.weight_v']
lowerCamelCase : Optional[Any] = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCamelCase : Tuple = checkpoint[F"""upsamples.{i}.1.weight_g"""]
lowerCamelCase : Any = checkpoint[F"""upsamples.{i}.1.weight_v"""]
lowerCamelCase : List[str] = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCamelCase : Optional[Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
lowerCamelCase : Tuple = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
lowerCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
lowerCamelCase : Dict = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
lowerCamelCase : str = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
lowerCamelCase : Optional[Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
lowerCamelCase : Any = checkpoint['output_conv.1.weight_g']
lowerCamelCase : Tuple = checkpoint['output_conv.1.weight_v']
lowerCamelCase : int = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCAmelCase ( a_, a_, a_, a_=None, a_=None, ):
'''simple docstring'''
if config_path is not None:
lowerCamelCase : str = SpeechTaHifiGanConfig.from_pretrained(a_ )
else:
lowerCamelCase : Dict = SpeechTaHifiGanConfig()
lowerCamelCase : int = SpeechTaHifiGan(a_ )
lowerCamelCase : Optional[Any] = torch.load(a_ )
load_weights(orig_checkpoint['model']['generator'], a_, a_ )
lowerCamelCase : Tuple = np.load(a_ )
lowerCamelCase : str = stats[0].reshape(-1 )
lowerCamelCase : Optional[int] = stats[1].reshape(-1 )
lowerCamelCase : Dict = torch.from_numpy(a_ ).float()
lowerCamelCase : Optional[int] = torch.from_numpy(a_ ).float()
model.save_pretrained(a_ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(a_ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_A = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 205 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=3 ,)
return model
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : str = self.dummy_uncond_unet
_UpperCamelCase : Optional[int] = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vq_model
_UpperCamelCase : Any = LDMPipeline(unet=lowerCamelCase__ ,vqvae=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
ldm.to(lowerCamelCase__ )
ldm.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : int = torch.manual_seed(0 )
_UpperCamelCase : List[str] = ldm(generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type='numpy' ).images
_UpperCamelCase : Any = torch.manual_seed(0 )
_UpperCamelCase : Any = ldm(generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type='numpy' ,return_dict=lowerCamelCase__ )[0]
_UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
_UpperCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : int = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_UpperCamelCase : Union[str, Any] = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowerCamelCase__ )
ldm.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = ldm(generator=lowerCamelCase__ ,num_inference_steps=5 ,output_type='numpy' ).images
_UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase : Any = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_UpperCamelCase : List[Any] = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 83 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if "://" in dataset_path:
SCREAMING_SNAKE_CASE = dataset_path.split("""://""" )[1]
return dataset_path
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = not is_remote_filesystem(_SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_SCREAMING_SNAKE_CASE ) , fs._strip_protocol(_SCREAMING_SNAKE_CASE ) )
else:
fs.mv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , recursive=_SCREAMING_SNAKE_CASE )
def __lowercase ( ) -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = threading.Lock()
| 296 | 0 |
from collections import Counter
from timeit import timeit
def a__ ( UpperCAmelCase : str = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def a__ ( UpperCAmelCase : str = "" ) -> bool:
if len(UpperCAmelCase ) == 0:
return True
UpperCAmelCase : Tuple = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCAmelCase : dict[str, int] = {}
for character in lower_case_input_str:
UpperCAmelCase : Any = character_freq_dict.get(UpperCAmelCase , 0 ) + 1
UpperCAmelCase : int = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a__ ( UpperCAmelCase : str = "" ) -> None:
print('''\nFor string = ''' , UpperCAmelCase , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(UpperCAmelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
_lowerCamelCase : Any = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 359 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCamelCase : Any = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def a__ ( UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[str]=None ) -> List[Any]:
if rng is None:
UpperCAmelCase : Dict = random.Random()
UpperCAmelCase : Optional[Any] = 1
for dim in shape:
total_dims *= dim
UpperCAmelCase : List[str] = []
for _ in range(UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCAmelCase : List[str] = np.array(UpperCAmelCase , dtype=jnp.intaa ).reshape(UpperCAmelCase )
return output
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=None ) -> List[str]:
UpperCAmelCase : Optional[int] = ids_tensor(UpperCAmelCase , vocab_size=2 , rng=UpperCAmelCase )
# make sure that at least one token is attended to for each batch
UpperCAmelCase : str = 1
return attn_mask
@require_flax
class __UpperCAmelCase :
UpperCamelCase = None
UpperCamelCase = ()
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : Dict = inputs['''input_ids'''].shape[-1] // 2
UpperCAmelCase : Dict = inputs['''input_ids'''][:max_batch_size, :sequence_length]
UpperCAmelCase : Optional[int] = jnp.ones_like(__A )
UpperCAmelCase : Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCAmelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCAmelCase : Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self._get_input_ids_and_config()
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Any = max_length
UpperCAmelCase : List[Any] = 0
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : List[Any] = getattr(__A, __A )
UpperCAmelCase : Union[str, Any] = pt_model_class(__A ).eval()
UpperCAmelCase : Tuple = load_flax_weights_in_pytorch_model(__A, flax_model.params )
UpperCAmelCase : Dict = flax_model.generate(__A ).sequences
UpperCAmelCase : str = pt_model.generate(torch.tensor(__A, dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCAmelCase : Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist() )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
UpperCAmelCase : str = False
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : Optional[int] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : List[Any] = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = self._get_input_ids_and_config()
UpperCAmelCase : str = True
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : Optional[Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : str = jit(model.generate )
UpperCAmelCase : List[Any] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = self._get_input_ids_and_config()
UpperCAmelCase : Dict = False
UpperCAmelCase : Union[str, Any] = max_length
UpperCAmelCase : List[Any] = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : str = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : int = jit(model.generate )
UpperCAmelCase : Union[str, Any] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self._get_input_ids_and_config()
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[int] = max_length
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : str = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : Optional[Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences )
def __magic_name__ ( self : Any ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = self._get_input_ids_and_config()
UpperCAmelCase : str = True
UpperCAmelCase : Union[str, Any] = max_length
UpperCAmelCase : Union[str, Any] = 0.8
UpperCAmelCase : str = 1_0
UpperCAmelCase : Any = 0.3
UpperCAmelCase : str = 1
UpperCAmelCase : Union[str, Any] = 8
UpperCAmelCase : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Optional[int] = jit(model.generate )
UpperCAmelCase : Any = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = self._get_input_ids_and_config()
UpperCAmelCase : Optional[Any] = max_length
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Optional[Any] = 8
UpperCAmelCase : Optional[int] = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[str] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Dict = jit(model.generate )
UpperCAmelCase : List[str] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
UpperCAmelCase : List[str] = max_length
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = 8
UpperCAmelCase : int = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Union[str, Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : List[str] = jit(model.generate )
UpperCAmelCase : Tuple = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Tuple = False
UpperCAmelCase : str = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Union[str, Any] = model.generate(__A, attention_mask=__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : List[Any] = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(__A, attention_mask=__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Optional[Any] = model.generate(__A, attention_mask=__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Optional[Any] = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(__A, attention_mask=__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Dict = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : int = model.generate(__A, attention_mask=__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Optional[Any] = jit(model.generate )
UpperCAmelCase : Dict = jit_generate(__A, attention_mask=__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
UpperCAmelCase : List[str] = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase : int = '''Hello world'''
UpperCAmelCase : Optional[int] = tokenizer(__A, return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__A, '''do_samples''' ):
model.generate(__A, do_samples=__A )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__A, '''foo''' ):
UpperCAmelCase : Any = {'''foo''': '''bar'''}
model.generate(__A, **__A )
| 99 | 0 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
UpperCAmelCase_ = False
class lowerCamelCase__( unittest.TestCase):
pass
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
__lowerCamelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 12 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
_UpperCamelCase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : str = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : Optional[Any] = TaTokenizer
_SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=100 , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__UpperCAmelCase : List[Any] = [f'<extra_id_{i}>' for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__UpperCAmelCase : Any = len(set(filter(lambda __UpperCAmelCase : bool("""extra_id_""" in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : Optional[int] = vocab_file
__UpperCAmelCase : Any = False if not self.vocab_file else True
__UpperCAmelCase : Optional[int] = extra_ids
@staticmethod
def __A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__UpperCAmelCase : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __UpperCAmelCase , )
return max_model_length
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : Any = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__UpperCAmelCase : Optional[Any] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self ) -> Any:
'''simple docstring'''
return list(
set(filter(lambda __UpperCAmelCase : bool(re.search(r"""<extra_id_\d+>""" , __UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return [self.convert_tokens_to_ids(__UpperCAmelCase ) for token in self.get_sentinel_tokens()]
| 254 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _a ( unittest.TestCase ):
@slow
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Optional[int] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase_: Any = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_: Tuple = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_: Any = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_: Tuple = model(lowercase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape, lowercase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1], lowercase_, atol=1E-3 ) )
@slow
def __snake_case (self ) -> int:
UpperCAmelCase_: List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
UpperCAmelCase_: List[Any] = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_: Optional[Any] = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_: int = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_: str = model(lowercase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape, lowercase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1], lowercase_, atol=1E-3 ) )
| 356 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a : Optional[int] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
a : Any = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: int=False ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: int = create_model(
"""HTSAT-tiny""" , """roberta""" , lowerCAmelCase__ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=lowerCAmelCase__ , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = {}
UpperCAmelCase_: Optional[Any] = r""".*sequential.(\d+).*"""
UpperCAmelCase_: str = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_: Optional[int] = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase_: int = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase_: Dict = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(lowerCAmelCase__ )//3}.linear.' )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_: int = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase_: Optional[Any] = 1 if projecton_layer == 0 else 2
UpperCAmelCase_: Tuple = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase_: str = value
UpperCAmelCase_: Optional[int] = mixed_qkv.size(0 ) // 3
UpperCAmelCase_: Optional[int] = mixed_qkv[:qkv_dim]
UpperCAmelCase_: List[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase_: int = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase_: str = query_layer
UpperCAmelCase_: List[Any] = key_layer
UpperCAmelCase_: str = value_layer
else:
UpperCAmelCase_: Tuple = value
return model_state_dict
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: List[Any]=False ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase_: Optional[Any] = clap_model.state_dict()
UpperCAmelCase_: Optional[Any] = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase_: Dict = ClapConfig()
UpperCAmelCase_: Tuple = enable_fusion
UpperCAmelCase_: int = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
a : Optional[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 82 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase : int = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 300 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
A_ : Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" )
A_ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : Optional[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Optional[Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any:
A_ : Dict = dct.pop(_lowerCAmelCase )
A_ : List[Any] = val
def __snake_case ( _lowerCAmelCase : List[str] ) -> int:
if "handwritten" in checkpoint_url:
A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase )
A_ : Tuple = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Optional[Any] = 1024
A_ : Union[str, Any] = 4096
A_ : Union[str, Any] = 24
A_ : List[Any] = 16
A_ : List[str] = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Dict = False
A_ : int = "relu"
A_ : Optional[int] = 1024
A_ : Any = True
A_ : List[Any] = False
A_ : Optional[int] = False
# load HuggingFace model
A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase )
A_ : str = TrOCRForCausalLM(_lowerCAmelCase )
A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"]
A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Dict = state_dict.pop(_lowerCAmelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
A_ : List[str] = val
else:
A_ : Optional[Any] = val
# load state dict
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" )
A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase )
A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values
# verify logits
A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
A_ : Tuple = outputs.logits
A_ : Union[str, Any] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Union[str, Any] = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : str = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 300 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = 1
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE )
return image
@property
def lowercase_ ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, )
return model
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, )
return model
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = RobertaSeriesConfig(
hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=5006, )
return RobertaSeriesModelWithTransformation(_SCREAMING_SNAKE_CASE )
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
def extract(*a_ : Optional[int], **a_ : Optional[int] ):
class UpperCAmelCase :
def __init__( self : str ):
"""simple docstring"""
UpperCamelCase__ = torch.ones([0] )
def lowercase_ ( self : int, a_ : Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(_SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.dummy_cond_unet
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = self.dummy_vae
UpperCamelCase__ = self.dummy_text_encoder
UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCamelCase__ = 77
UpperCamelCase__ = self.dummy_image.to(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCamelCase__ = AltDiffusionImgaImgPipeline(
unet=_SCREAMING_SNAKE_CASE, scheduler=_SCREAMING_SNAKE_CASE, vae=_SCREAMING_SNAKE_CASE, text_encoder=_SCREAMING_SNAKE_CASE, tokenizer=_SCREAMING_SNAKE_CASE, safety_checker=_SCREAMING_SNAKE_CASE, feature_extractor=self.dummy_extractor, )
UpperCamelCase__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = alt_pipe.to(_SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = "A painting of a squirrel eating a burger"
UpperCamelCase__ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCamelCase__ = alt_pipe(
[prompt], generator=_SCREAMING_SNAKE_CASE, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=_SCREAMING_SNAKE_CASE, )
UpperCamelCase__ = output.images
UpperCamelCase__ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCamelCase__ = alt_pipe(
[prompt], generator=_SCREAMING_SNAKE_CASE, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=_SCREAMING_SNAKE_CASE, return_dict=_SCREAMING_SNAKE_CASE, )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda", "This test requires a GPU" )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.dummy_cond_unet
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = self.dummy_vae
UpperCamelCase__ = self.dummy_text_encoder
UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCamelCase__ = 77
UpperCamelCase__ = self.dummy_image.to(_SCREAMING_SNAKE_CASE )
# put models in fp16
UpperCamelCase__ = unet.half()
UpperCamelCase__ = vae.half()
UpperCamelCase__ = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ = AltDiffusionImgaImgPipeline(
unet=_SCREAMING_SNAKE_CASE, scheduler=_SCREAMING_SNAKE_CASE, vae=_SCREAMING_SNAKE_CASE, text_encoder=_SCREAMING_SNAKE_CASE, tokenizer=_SCREAMING_SNAKE_CASE, safety_checker=_SCREAMING_SNAKE_CASE, feature_extractor=self.dummy_extractor, )
UpperCamelCase__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = alt_pipe.to(_SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = "A painting of a squirrel eating a burger"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = alt_pipe(
[prompt], generator=_SCREAMING_SNAKE_CASE, num_inference_steps=2, output_type="np", image=_SCREAMING_SNAKE_CASE, ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda", "This test requires a GPU" )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase__ = init_image.resize((760, 504) )
UpperCamelCase__ = "BAAI/AltDiffusion"
UpperCamelCase__ = AltDiffusionImgaImgPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE, safety_checker=_SCREAMING_SNAKE_CASE, )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
UpperCamelCase__ = "A fantasy landscape, trending on artstation"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=_SCREAMING_SNAKE_CASE, image=_SCREAMING_SNAKE_CASE, strength=0.75, guidance_scale=7.5, generator=_SCREAMING_SNAKE_CASE, output_type="np", )
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCamelCase__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCamelCase__ = init_image.resize((768, 512) )
UpperCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
UpperCamelCase__ = "BAAI/AltDiffusion"
UpperCamelCase__ = AltDiffusionImgaImgPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE, safety_checker=_SCREAMING_SNAKE_CASE, )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
UpperCamelCase__ = "A fantasy landscape, trending on artstation"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=_SCREAMING_SNAKE_CASE, image=_SCREAMING_SNAKE_CASE, strength=0.75, guidance_scale=7.5, generator=_SCREAMING_SNAKE_CASE, output_type="np", )
UpperCamelCase__ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2 | 368 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 0 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCAmelCase : Union[str, Any] = HUGGINGFACE_HUB_CACHE
lowerCAmelCase : int = """config.json"""
lowerCAmelCase : str = """diffusion_pytorch_model.bin"""
lowerCAmelCase : Dict = """diffusion_flax_model.msgpack"""
lowerCAmelCase : Optional[int] = """model.onnx"""
lowerCAmelCase : List[str] = """diffusion_pytorch_model.safetensors"""
lowerCAmelCase : List[Any] = """weights.pb"""
lowerCAmelCase : List[str] = """https://huggingface.co"""
lowerCAmelCase : Optional[int] = default_cache_path
lowerCAmelCase : List[str] = """diffusers_modules"""
lowerCAmelCase : Dict = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCAmelCase : Dict = ["""fp16""", """non-ema"""]
lowerCAmelCase : Optional[int] = """.self_attn"""
| 13 |
'''simple docstring'''
lowerCAmelCase : Optional[int] =256
# Modulus to hash a string
lowerCAmelCase : Tuple =1_000_003
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : str ):
lowercase_ :Dict = len(__lowerCamelCase )
lowercase_ :int = len(__lowerCamelCase )
if p_len > t_len:
return False
lowercase_ :Any = 0
lowercase_ :Dict = 0
lowercase_ :int = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCamelCase ):
lowercase_ :Union[str, Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase_ :Tuple = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase_ :Tuple = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase_ :Tuple = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase_ ( ):
lowercase_ :List[Any] = "abc1abc12"
lowercase_ :List[str] = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase_ :int = "alskfjaldsk23adsfabcabc"
assert rabin_karp(__lowerCamelCase ,__lowerCamelCase ) and not rabin_karp(__lowerCamelCase ,__lowerCamelCase )
# Test 2)
lowercase_ :Union[str, Any] = "ABABX"
lowercase_ :Optional[Any] = "ABABZABABYABABX"
assert rabin_karp(__lowerCamelCase ,__lowerCamelCase )
# Test 3)
lowercase_ :Dict = "AAAB"
lowercase_ :int = "ABAAAAAB"
assert rabin_karp(__lowerCamelCase ,__lowerCamelCase )
# Test 4)
lowercase_ :Tuple = "abcdabcy"
lowercase_ :Union[str, Any] = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(__lowerCamelCase ,__lowerCamelCase )
# Test 5)
lowercase_ :Tuple = "Lü"
lowercase_ :List[Any] = "Lüsai"
assert rabin_karp(__lowerCamelCase ,__lowerCamelCase )
lowercase_ :str = "Lue"
assert not rabin_karp(__lowerCamelCase ,__lowerCamelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 223 | 0 |
from collections.abc import Callable
import numpy as np
def UpperCamelCase ( snake_case__ : Callable , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ) -> Union[str, Any]:
UpperCamelCase : Any = int(np.ceil((x_end - xa) / step_size ) )
UpperCamelCase : Tuple = np.zeros((n + 1,) )
UpperCamelCase : List[Any] = ya
UpperCamelCase : Any = xa
for k in range(snake_case__ ):
UpperCamelCase : Optional[Any] = y[k] + step_size * ode_func(snake_case__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=4, ) -> Dict:
UpperCamelCase : Optional[int] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Optional[int] = seq_length
UpperCamelCase : Any = is_training
UpperCamelCase : Tuple = use_attention_mask
UpperCamelCase : Dict = use_token_type_ids
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : Any = vocab_size
UpperCamelCase : Any = hidden_size
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Any = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : int = type_vocab_size
UpperCamelCase : Optional[int] = type_sequence_label_size
UpperCamelCase : str = initializer_range
UpperCamelCase : Tuple = num_choices
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : Dict = None
if self.use_attention_mask:
UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : int = None
if self.use_token_type_ids:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase : Optional[int] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = config_and_inputs
UpperCamelCase : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : int = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = config_and_inputs
UpperCamelCase : Dict = True
UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Optional[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : int = FlaxRobertaPreLayerNormModelTester(self )
@slow
def snake_case_ ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
UpperCamelCase : List[str] = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : List[Any] = [1, 11, 5_0265]
self.assertEqual(list(output.shape ), SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
UpperCamelCase : Optional[int] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
UpperCamelCase : Any = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
| 103 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str]=False):
lowercase__ : int = OmegaConf.load(_lowerCamelCase)
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase)))
return config
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=None):
if conf_path is None:
lowercase__ : Optional[int] = "./model_checkpoints/vqgan_only.yaml"
lowercase__ : Union[str, Any] = load_config(_lowerCamelCase , display=_lowerCamelCase)
lowercase__ : List[Any] = VQModel(**config.model.params)
if ckpt_path is None:
lowercase__ : List[Any] = "./model_checkpoints/vqgan_only.pt"
lowercase__ : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase)
if ".ckpt" in ckpt_path:
lowercase__ : List[str] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase)
model.to(_lowerCamelCase)
del sd
return model
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]):
lowercase__ , lowercase__ , lowercase__ : Dict = model.encode(_lowerCamelCase)
print(f'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''')
lowercase__ : Union[str, Any] = model.decode(_lowerCamelCase)
return xrec
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple=False):
lowercase__ , lowercase__ : List[str] = string.rsplit("." , 1)
if reload:
lowercase__ : Any = importlib.import_module(_lowerCamelCase)
importlib.reload(_lowerCamelCase)
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase) , cls)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
if "target" not in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params" , {}))
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Dict=True , _lowerCamelCase : Dict=True):
lowercase__ : str = instantiate_from_config(_lowerCamelCase)
if sd is not None:
model.load_state_dict(_lowerCamelCase)
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any):
# load the specified checkpoint
if ckpt:
lowercase__ : int = torch.load(_lowerCamelCase , map_location="cpu")
lowercase__ : str = pl_sd["global_step"]
print(f'''loaded model from global step {global_step}.''')
else:
lowercase__ : Tuple = {"state_dict": None}
lowercase__ : Optional[Any] = None
lowercase__ : List[Any] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase)["model"]
return model, global_step
| 87 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False):
try:
lowercase__ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : int = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Optional[int] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCamelCase = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase_ ( _lowerCamelCase : int):
try:
import faiss # noqa
except ImportError:
lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import regex # noqa
except ImportError:
lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import elasticsearch # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
try:
import sqlalchemy # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.TORCH_AVAILABLE:
lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not config.TF_AVAILABLE:
lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not config.JAX_AVAILABLE:
lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.PIL_AVAILABLE:
lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[Any]):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
def _require_spacy_model(_lowerCamelCase : Optional[int]):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase)
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
except OSError:
return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase)
else:
return test_case
return _require_spacy_model
def lowercase_ ( _lowerCamelCase : Dict):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : List[str]):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not _run_slow_tests or _run_slow_tests == 0:
lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not _run_local_tests or _run_local_tests == 0:
lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not _run_remote_tests or _run_remote_tests == 0:
lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase)
return test_case
def lowercase_ ( *_lowerCamelCase : str):
def decorate(cls : str):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase) and name.startswith("test"):
for decorator in decorators:
lowercase__ : Optional[int] = decorator(_lowerCamelCase)
setattr(cls , _lowerCamelCase , _lowerCamelCase)
return cls
return decorate
class snake_case_ ( __A ):
pass
class snake_case_ ( __A ):
__A : List[Any] = 0
__A : str = 1
__A : int = 2
@contextmanager
def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16):
lowercase__ : int = requests.Session().request
def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str):
# Change the url to an invalid url so that the connection hangs
lowercase__ : Any = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''')
lowercase__ : Dict = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowercase__ : Dict = url
lowercase__ : Union[str, Any] = e.args[0]
lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),)
lowercase__ : int = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple):
raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple):
lowercase__ : Dict = str(Path().resolve())
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir:
try:
os.chdir(_lowerCamelCase)
yield
finally:
os.chdir(_lowerCamelCase)
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : Union[str, Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]):
return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist()
def lowercase_ ( _lowerCamelCase : str):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict):
try:
return func(*_lowerCamelCase , **_lowerCamelCase)
except HTTPError as err:
if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"):
pytest.xfail(str(_lowerCamelCase))
raise err
return decorator.decorator(_wrapper , _lowerCamelCase)
class snake_case_ :
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]:
lowercase__ : Tuple = returncode
lowercase__ : int = stdout
lowercase__ : Union[str, Any] = stderr
async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict):
while True:
lowercase__ : Optional[int] = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : str = []
lowercase__ : List[str] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")),
_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True):
lowercase__ : Any = asyncio.get_event_loop()
lowercase__ : Tuple = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : int = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Any = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''')
return result
def lowercase_ ( ):
lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0")
lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M)
return int(_lowerCamelCase)
def lowercase_ ( ):
lowercase__ : Union[str, Any] = 2_9500
lowercase__ : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 87 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __A ( ):
_UpperCAmelCase : List[Any] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase_ )
env_command_parser(subparsers=lowerCAmelCase_ )
launch_command_parser(subparsers=lowerCAmelCase_ )
tpu_command_parser(subparsers=lowerCAmelCase_ )
test_command_parser(subparsers=lowerCAmelCase_ )
# Let's go
_UpperCAmelCase : Any = parser.parse_args()
if not hasattr(lowerCAmelCase_ , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 170 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f"{torch_layer} layer.weight does not match"
_UpperCAmelCase : Dict = nn.Parameter(lowerCAmelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"{torch_layer} layer.bias does not match"
_UpperCAmelCase : Optional[Any] = nn.Parameter(lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# set torch weights for 1-to-1 comparison
_UpperCAmelCase : List[str] = np.asarray(weights[0] )
_UpperCAmelCase : Union[str, Any] = np.asarray(weights[1] )
_UpperCAmelCase : Optional[Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# set torch weights for 1-to-1 comparison
_UpperCAmelCase : Optional[int] = np.asarray(weights[0] )
_UpperCAmelCase : Tuple = np.asarray(weights[1] )
_UpperCAmelCase : List[str] = np.asarray(weights[2] )
_UpperCAmelCase : str = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# layernorm 1
_UpperCAmelCase : Tuple = weights[0][0][0]
_UpperCAmelCase : Optional[int] = np.asarray(layer_norm_a[0] )
_UpperCAmelCase : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# lsh weights + output
_UpperCAmelCase : List[Any] = weights[0][1]
if len(lowerCAmelCase_ ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
else:
set_layer_weights_in_torch_local(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
# intermediate weighs
_UpperCAmelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase_ ) == 4:
_UpperCAmelCase : List[str] = intermediate_weights[2]
# layernorm 2
_UpperCAmelCase : str = np.asarray(intermediate_weights[0][0] )
_UpperCAmelCase : Dict = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# intermediate dense
_UpperCAmelCase : int = np.asarray(intermediate_weights[1][0] )
_UpperCAmelCase : List[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
# intermediate out
_UpperCAmelCase : Tuple = np.asarray(intermediate_weights[4][0] )
_UpperCAmelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# reformer model
_UpperCAmelCase : Union[str, Any] = torch_model.reformer
# word embeds
_UpperCAmelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase_ ) , )
if isinstance(weights[3] , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_UpperCAmelCase : Any = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"{position_embeddings[emb_idx]} emb does not match"
_UpperCAmelCase : Dict = nn.Parameter(torch.tensor(lowerCAmelCase_ ) )
_UpperCAmelCase : str = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_UpperCAmelCase : Any = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# output layer norm
_UpperCAmelCase : str = np.asarray(weights[7][0] )
_UpperCAmelCase : Optional[int] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# output embeddings
_UpperCAmelCase : Tuple = np.asarray(weights[9][0] )
_UpperCAmelCase : Optional[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# Initialise PyTorch model
_UpperCAmelCase : Optional[int] = ReformerConfig.from_json_file(lowerCAmelCase_ )
print(f"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase : Any = ReformerModelWithLMHead(lowerCAmelCase_ )
with open(lowerCAmelCase_ , """rb""" ) as f:
_UpperCAmelCase : List[str] = pickle.load(lowerCAmelCase_ )["""weights"""]
set_model_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , config.hidden_size )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ : Tuple = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 170 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
a_ : Tuple = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase__ )
class __UpperCamelCase :
lowercase : str
lowercase : str
lowercase : Optional[str] =None
lowercase : Optional[str] =None
lowercase : Optional[str] =None
@dataclass(frozen=lowerCamelCase__ )
class __UpperCamelCase :
lowercase : List[int]
lowercase : Optional[List[int]] =None
lowercase : Optional[List[int]] =None
lowercase : Optional[Union[int, float]] =None
lowercase : Optional[int] =None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[InputFeatures]
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase=False, lowerCAmelCase = False, ):
"""simple docstring"""
lowerCamelCase_ =hans_processors[task]()
lowerCamelCase_ =os.path.join(
lowerCAmelCase, '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''', tokenizer.__class__.__name__, str(lowerCAmelCase ), lowerCAmelCase, ), )
lowerCamelCase_ =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase_, lowerCamelCase_ =label_list[2], label_list[1]
lowerCamelCase_ =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
lowerCamelCase_ =torch.load(lowerCAmelCase )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
lowerCamelCase_ =(
processor.get_dev_examples(lowerCAmelCase ) if evaluate else processor.get_train_examples(lowerCAmelCase )
)
logger.info('''Training examples: %s''', len(lowerCAmelCase ) )
lowerCamelCase_ =hans_convert_examples_to_features(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
logger.info('''Saving features into cached file %s''', lowerCAmelCase )
torch.save(self.features, lowerCAmelCase )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
return self.features[i]
def lowercase__ ( self ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class __UpperCamelCase :
lowercase : List[InputFeatures]
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 128, lowerCAmelCase=False, lowerCAmelCase = False, ):
"""simple docstring"""
lowerCamelCase_ =hans_processors[task]()
lowerCamelCase_ =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase_, lowerCamelCase_ =label_list[2], label_list[1]
lowerCamelCase_ =label_list
lowerCamelCase_ =processor.get_dev_examples(lowerCAmelCase ) if evaluate else processor.get_train_examples(lowerCAmelCase )
lowerCamelCase_ =hans_convert_examples_to_features(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ), desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(lowerCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase_ =tf.data.Dataset.from_generator(
lowerCAmelCase, (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
), (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
), )
def lowercase__ ( self ):
"""simple docstring"""
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
return self.features[i]
def lowercase__ ( self ):
"""simple docstring"""
return self.label_list
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase, '''heuristics_train_set.txt''' ) ), '''train''' )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase, '''heuristics_evaluation_set.txt''' ) ), '''dev''' )
def lowercase__ ( self ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for i, line in enumerate(lowerCAmelCase ):
if i == 0:
continue
lowerCamelCase_ ='''%s-%s''' % (set_type, line[0])
lowerCamelCase_ =line[5]
lowerCamelCase_ =line[6]
lowerCamelCase_ =line[7][2:] if line[7].startswith('''ex''' ) else line[7]
lowerCamelCase_ =line[0]
examples.append(InputExample(guid=lowerCAmelCase, text_a=lowerCAmelCase, text_b=lowerCAmelCase, label=lowerCAmelCase, pairID=lowerCAmelCase ) )
return examples
def a_ ( __snake_case : List[InputExample] , __snake_case : List[str] , __snake_case : int , __snake_case : PreTrainedTokenizer , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ ={label: i for i, label in enumerate(__snake_case )}
lowerCamelCase_ =[]
for ex_index, example in tqdm.tqdm(enumerate(__snake_case ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
lowerCamelCase_ =tokenizer(
example.text_a , example.text_b , add_special_tokens=__snake_case , max_length=__snake_case , padding='''max_length''' , truncation=__snake_case , return_overflowing_tokens=__snake_case , )
lowerCamelCase_ =label_map[example.label] if example.label in label_map else 0
lowerCamelCase_ =int(example.pairID )
features.append(InputFeatures(**__snake_case , label=__snake_case , pairID=__snake_case ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
a_ : int = {
"""hans""": 3,
}
a_ : List[str] = {
"""hans""": HansProcessor,
}
| 75 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ ={
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
lowerCamelCase_ ={
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(self.tmpdirname, lowerCAmelCase )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.feature_extraction_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
# load decoder from hub
lowerCamelCase_ ='''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor, lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, )
self.assertIsInstance(processor.decoder, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha, 5.0 )
self.assertEqual(processor.language_model.beta, 3.0 )
self.assertEqual(processor.language_model.score_boundary, -7.0 )
self.assertEqual(processor.language_model.unk_score_offset, 3 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(lowerCAmelCase, '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =floats_list((3, 1_000) )
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =processor(lowerCAmelCase, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ ='''This is a test string'''
lowerCamelCase_ =processor(text=lowerCAmelCase )
lowerCamelCase_ =tokenizer(lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self, lowerCAmelCase=(2, 10, 16), lowerCAmelCase=77 ):
"""simple docstring"""
np.random.seed(lowerCAmelCase )
return np.random.rand(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits(shape=(10, 16), seed=13 )
lowerCamelCase_ =processor.decode(lowerCAmelCase )
lowerCamelCase_ =decoder.decode_beams(lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0], decoded_processor.text )
self.assertEqual('''</s> <s> </s>''', decoded_processor.text )
self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase )
else:
with get_context(lowerCAmelCase ).Pool() as pool:
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =list(lowerCAmelCase )
with get_context('''fork''' ).Pool() as p:
lowerCamelCase_ =decoder.decode_beams_batch(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =[], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase, decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''], decoded_processor.text )
self.assertListEqual(lowerCAmelCase, decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase, decoded_processor.lm_score )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =15
lowerCamelCase_ =-2_0.0
lowerCamelCase_ =-4.0
lowerCamelCase_ =processor.batch_decode(
lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, )
lowerCamelCase_ =decoded_processor_out.text
lowerCamelCase_ =list(lowerCAmelCase )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase_ =decoder.decode_beams_batch(
lowerCAmelCase, lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, )
lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out]
lowerCamelCase_ =[d[0][2] for d in decoded_decoder_out]
lowerCamelCase_ =[d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''], lowerCAmelCase )
self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7], lowerCAmelCase, atol=1e-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4], lowerCAmelCase, atol=1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =2.0
lowerCamelCase_ =5.0
lowerCamelCase_ =-2_0.0
lowerCamelCase_ =True
lowerCamelCase_ =processor.batch_decode(
lowerCAmelCase, alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, )
lowerCamelCase_ =decoded_processor_out.text
lowerCamelCase_ =list(lowerCAmelCase )
decoder.reset_params(
alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase_ =decoder.decode_beams_batch(
lowerCAmelCase, lowerCAmelCase, )
lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''], lowerCAmelCase )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha, 2.0 )
self.assertEqual(lm_model.beta, 5.0 )
self.assertEqual(lm_model.unk_score_offset, -2_0.0 )
self.assertEqual(lm_model.score_boundary, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase_ =os.listdir(lowerCAmelCase )
lowerCamelCase_ =['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =snapshot_download('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase_ =os.listdir(lowerCAmelCase )
lowerCamelCase_ =os.listdir(lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =floats_list((3, 1_000) )
lowerCamelCase_ =processor_wavaveca(lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =processor_auto(lowerCAmelCase, return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2 )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =processor_wavaveca.batch_decode(lowerCAmelCase )
lowerCamelCase_ =processor_auto.batch_decode(lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text, decoded_auto.text )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_decoder()
lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase )
self.assertListEqual(
processor.model_input_names, feature_extractor.model_input_names, msg='''`processor` and `feature_extractor` model input names do not match''', )
@staticmethod
def lowercase__ ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[d[key] for d in offsets]
return retrieved_list
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =self._get_dummy_logits()[0]
lowerCamelCase_ =processor.decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ) ), outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''start_offset''' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''end_offset''' ), [1, 3, 5] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase_ =self._get_dummy_logits()
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ) for o in outputs['''word_offsets''']], outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''start_offset''' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''end_offset''' ), [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase__ ( self ):
"""simple docstring"""
import torch
lowerCamelCase_ =load_dataset('''common_voice''', '''en''', split='''train''', streaming=lowerCAmelCase )
lowerCamelCase_ =ds.cast_column('''audio''', datasets.Audio(sampling_rate=16_000 ) )
lowerCamelCase_ =iter(lowerCAmelCase )
lowerCamelCase_ =next(lowerCAmelCase )
lowerCamelCase_ =AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
lowerCamelCase_ =WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCamelCase_ =processor(sample['''audio''']['''array'''], return_tensors='''pt''' ).input_values
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).logits.cpu().numpy()
lowerCamelCase_ =processor.decode(logits[0], output_word_offsets=lowerCAmelCase )
lowerCamelCase_ =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCamelCase_ =[
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
lowerCamelCase_ ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), lowerCAmelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), output.text )
# output times
lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''start_time''' ) )
lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''end_time''' ) )
# fmt: off
lowerCamelCase_ =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
lowerCamelCase_ =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
| 75 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[int] ):
__A = tempfile.mkdtemp()
__A = 8
# DPR tok
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,"dpr_tokenizer" )
os.makedirs(_snake_case ,exist_ok=_snake_case )
__A = os.path.join(_snake_case ,DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
__A = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
__A = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
__A = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__A = {"unk_token": "<unk>"}
__A = os.path.join(self.tmpdirname ,"bart_tokenizer" )
os.makedirs(_snake_case ,exist_ok=_snake_case )
__A = os.path.join(_snake_case ,BART_VOCAB_FILES_NAMES["vocab_file"] )
__A = os.path.join(_snake_case ,BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
def UpperCamelCase_ ( self : Tuple ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"dpr_tokenizer" ) )
def UpperCamelCase_ ( self : Optional[Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"bart_tokenizer" ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCamelCase_ ( self : Dict ):
__A = os.path.join(self.tmpdirname ,"rag_tokenizer" )
__A = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() )
__A = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_snake_case )
rag_tokenizer.save_pretrained(_snake_case )
__A = RagTokenizer.from_pretrained(_snake_case ,config=_snake_case )
self.assertIsInstance(new_rag_tokenizer.question_encoder ,_snake_case )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator ,_snake_case )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() )
@slow
def UpperCamelCase_ ( self : str ):
__A = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
__A = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
__A = tokenizer(_snake_case )
self.assertIsNotNone(_snake_case )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
__A = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
__A = tokenizer(_snake_case )
self.assertIsNotNone(_snake_case )
| 351 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase ( a_ ) -> None:
"""simple docstring"""
__A , __A = analyze_text(a_ )
__A = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
__A = sum(single_char_strings.values() )
# one length string
__A = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__A = single_char_strings[ch]
__A = my_str / all_sum
my_fir_sum += prob * math.loga(a_ ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
__A = sum(two_char_strings.values() )
__A = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__A = cha + cha
if sequence in two_char_strings:
__A = two_char_strings[sequence]
__A = int(a_ ) / all_sum
my_sec_sum += prob * math.loga(a_ )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def UpperCAmelCase ( a_ ) -> tuple[dict, dict]:
"""simple docstring"""
__A = Counter() # type: ignore
__A = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 124 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : Optional[Any] = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
lowercase__ : Dict = {
'''bert-base-uncased''': 5_1_2,
'''bert-large-uncased''': 5_1_2,
'''bert-base-cased''': 5_1_2,
'''bert-large-cased''': 5_1_2,
'''bert-base-multilingual-uncased''': 5_1_2,
'''bert-base-multilingual-cased''': 5_1_2,
'''bert-base-chinese''': 5_1_2,
'''bert-base-german-cased''': 5_1_2,
'''bert-large-uncased-whole-word-masking''': 5_1_2,
'''bert-large-cased-whole-word-masking''': 5_1_2,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_1_2,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_1_2,
'''bert-base-cased-finetuned-mrpc''': 5_1_2,
'''bert-base-german-dbmdz-cased''': 5_1_2,
'''bert-base-german-dbmdz-uncased''': 5_1_2,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_1_2,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_1_2,
'''wietsedv/bert-base-dutch-cased''': 5_1_2,
}
lowercase__ : Tuple = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class UpperCamelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = BertTokenizer
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Any="[UNK]" , SCREAMING_SNAKE_CASE_ : List[Any]="[SEP]" , SCREAMING_SNAKE_CASE_ : Any="[PAD]" , SCREAMING_SNAKE_CASE_ : int="[CLS]" , SCREAMING_SNAKE_CASE_ : Dict="[MASK]" , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
lowerCAmelCase_ : int = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
lowerCAmelCase_ : Optional[Any] = do_lower_case
lowerCAmelCase_ : List[Any] = strip_accents
lowerCAmelCase_ : int = tokenize_chinese_chars
lowerCAmelCase_ : List[str] = normalizer_class(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=None ):
lowerCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str = None ):
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] = None ):
lowerCAmelCase_ : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 224 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=10_24 , lowerCamelCase__=10_24 , lowerCamelCase__=False , **lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = AutoTokenizer.from_pretrained(lowerCamelCase__ )
A_ : Union[str, Any] = SeqaSeqDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , type_path="""train""" , **lowerCamelCase__ )
A_ : Optional[Any] = tok.pad_token_id
def get_lens(lowerCamelCase__ ):
A_ : int = tqdm(
DataLoader(lowerCamelCase__ , batch_size=5_12 , num_workers=8 , shuffle=lowerCamelCase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
A_ : int = []
for batch in dl:
A_ : str = batch["""input_ids"""].ne(lowerCamelCase__ ).sum(1 ).tolist()
A_ : Tuple = batch["""labels"""].ne(lowerCamelCase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCamelCase__ , lowerCamelCase__ ):
max_lens.append(max(lowerCamelCase__ , lowerCamelCase__ ) )
else:
max_lens.extend(lowerCamelCase__ )
return max_lens
A_ : str = get_lens(lowerCamelCase__ )
A_ : Dict = SeqaSeqDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , type_path="""val""" , **lowerCamelCase__ )
A_ : List[Any] = get_lens(lowerCamelCase__ )
pickle_save(lowerCamelCase__ , train_ds.len_file )
pickle_save(lowerCamelCase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 206 | 0 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCAmelCase__ :List[Any] = get_logger(__name__)
lowerCAmelCase__ :List[str] = R'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __a :
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __a :
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __a ( UpperCAmelCase ):
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
_UpperCAmelCase = inspect.signature(processor.__call__ ).parameters
if len(_SCREAMING_SNAKE_CASE ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
_UpperCAmelCase = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return scores
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
_UpperCAmelCase = temperature
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
_UpperCAmelCase = scores / self.temperature
return scores
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -float('Inf' ) , _SCREAMING_SNAKE_CASE = 1 ) -> List[Any]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
_UpperCAmelCase = top_p
_UpperCAmelCase = filter_value
_UpperCAmelCase = min_tokens_to_keep
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = lax.top_k(_SCREAMING_SNAKE_CASE , scores.shape[-1] )
_UpperCAmelCase = jnp.full_like(_SCREAMING_SNAKE_CASE , self.filter_value )
_UpperCAmelCase = jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 ).cumsum(axis=-1 )
_UpperCAmelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_UpperCAmelCase = jnp.roll(_SCREAMING_SNAKE_CASE , 1 )
score_mask |= score_mask.at[:, 0].set(_SCREAMING_SNAKE_CASE )
# min tokens to keep
_UpperCAmelCase = score_mask.at[:, : self.min_tokens_to_keep].set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jnp.where(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.lax.sort_key_val(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[-1]
return next_scores
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -float('Inf' ) , _SCREAMING_SNAKE_CASE = 1 ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = filter_value
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = scores.shape
_UpperCAmelCase = jnp.full(batch_size * vocab_size , self.filter_value )
_UpperCAmelCase = min(self.top_k , scores.shape[-1] ) # Safety check
_UpperCAmelCase , _UpperCAmelCase = lax.top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jnp.broadcast_to((jnp.arange(_SCREAMING_SNAKE_CASE ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
_UpperCAmelCase = topk_scores.flatten()
_UpperCAmelCase = topk_indices.flatten() + shift
_UpperCAmelCase = next_scores_flat.at[topk_indices_flat].set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = next_scores_flat.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return next_scores
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = bos_token_id
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
_UpperCAmelCase = jnp.full(scores.shape , -float('inf' ) )
_UpperCAmelCase = 1 - jnp.bool_(cur_len - 1 )
_UpperCAmelCase = jnp.where(_SCREAMING_SNAKE_CASE , new_scores.at[:, self.bos_token_id].set(0 ) , _SCREAMING_SNAKE_CASE )
return scores
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = max_length
_UpperCAmelCase = eos_token_id
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
_UpperCAmelCase = jnp.full(scores.shape , -float('inf' ) )
_UpperCAmelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
_UpperCAmelCase = jnp.where(_SCREAMING_SNAKE_CASE , new_scores.at[:, self.eos_token_id].set(0 ) , _SCREAMING_SNAKE_CASE )
return scores
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
_UpperCAmelCase = min_length
_UpperCAmelCase = eos_token_id
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
_UpperCAmelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
_UpperCAmelCase = jnp.where(_SCREAMING_SNAKE_CASE , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _SCREAMING_SNAKE_CASE )
return scores
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = list(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = begin_index
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = 1 - jnp.bool_(cur_len - self.begin_index )
_UpperCAmelCase = jnp.where(_SCREAMING_SNAKE_CASE , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _SCREAMING_SNAKE_CASE )
return scores
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = list(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
_UpperCAmelCase = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = dict(_SCREAMING_SNAKE_CASE )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_UpperCAmelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
_UpperCAmelCase = force_token_array.at[index].set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jnp.intaa(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
def _force_token(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = scores.shape[0]
_UpperCAmelCase = self.force_token_array[generation_idx]
_UpperCAmelCase = jnp.ones_like(_SCREAMING_SNAKE_CASE , dtype=scores.dtype ) * -float('inf' )
_UpperCAmelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
_UpperCAmelCase = lax.dynamic_update_slice(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (0, current_token) )
return new_scores
_UpperCAmelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_SCREAMING_SNAKE_CASE ) , lambda: scores , ) , )
return scores
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = generate_config.eos_token_id
_UpperCAmelCase = generate_config.no_timestamps_token_id
_UpperCAmelCase = generate_config.no_timestamps_token_id + 1
_UpperCAmelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_SCREAMING_SNAKE_CASE , 'max_initial_timestamp_index' ):
_UpperCAmelCase = generate_config.max_initial_timestamp_index
else:
_UpperCAmelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_UpperCAmelCase = model_config.vocab_size
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = jnp.where((cur_len - self.begin_index) >= 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = jnp.where((cur_len - self.begin_index) < 2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return jnp.where(
_SCREAMING_SNAKE_CASE , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = jax.vmap(_SCREAMING_SNAKE_CASE )(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jnp.where(cur_len == self.begin_index , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = self.timestamp_begin + self.max_initial_timestamp_index
_UpperCAmelCase = jnp.where(
_SCREAMING_SNAKE_CASE , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _SCREAMING_SNAKE_CASE , )
# if sum of probability over timestamps is above any other token, sample timestamp
_UpperCAmelCase = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
def handle_cumulative_probs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
_UpperCAmelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = jax.vmap(_SCREAMING_SNAKE_CASE )(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return scores
| 367 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __a ( UpperCAmelCase ):
_a : Optional[int] = 'MCTCTFeatureExtractor'
_a : int = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.feature_extractor
_UpperCAmelCase = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_UpperCAmelCase = kwargs.pop('raw_speech' )
else:
_UpperCAmelCase = kwargs.pop('audio' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('sampling_rate' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_UpperCAmelCase = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCAmelCase = encodings['input_ids']
return inputs
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('input_features' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('labels' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if input_features is not None:
_UpperCAmelCase = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if labels is not None:
_UpperCAmelCase = self.tokenizer.pad(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_UpperCAmelCase = labels['input_ids']
return input_features
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.feature_extractor
_UpperCAmelCase = False
| 185 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """open-llama"""
def __init__( self , lowerCAmelCase=100_000 , lowerCAmelCase=4_096 , lowerCAmelCase=11_008 , lowerCAmelCase=32 , lowerCAmelCase=32 , lowerCAmelCase="silu" , lowerCAmelCase=2_048 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-6 , lowerCAmelCase=True , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ) -> Tuple:
'''simple docstring'''
_lowercase =vocab_size
_lowercase =max_position_embeddings
_lowercase =hidden_size
_lowercase =intermediate_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =hidden_act
_lowercase =initializer_range
_lowercase =rms_norm_eps
_lowercase =use_cache
_lowercase =kwargs.pop(
'use_memorry_efficient_attention' , lowerCAmelCase )
_lowercase =hidden_dropout_prob
_lowercase =attention_dropout_prob
_lowercase =use_stable_embedding
_lowercase =shared_input_output_embedding
_lowercase =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , **lowerCAmelCase , )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'''got {self.rope_scaling}''' )
_lowercase =self.rope_scaling.get('type' , lowerCAmelCase )
_lowercase =self.rope_scaling.get('factor' , lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase , lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 205 |
import argparse
import os
import re
lowercase_ = 'src/transformers'
# Pattern that looks at the indentation in a line.
lowercase_ = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase_ = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase_ = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase_ = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase_ = re.compile(R'\[([^\]]+)\]')
def a ( A__ : Dict ) -> Optional[Any]:
"""simple docstring"""
_lowercase =_re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def a ( A__ : Optional[Any] , A__ : Dict="" , A__ : Union[str, Any]=None , A__ : Tuple=None ) -> Dict:
"""simple docstring"""
_lowercase =0
_lowercase =code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
_lowercase =['\n'.join(lines[:index] )]
else:
_lowercase =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowercase =[lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(A__ ) )
if index < len(A__ ) - 1:
_lowercase =[lines[index + 1]]
index += 1
else:
_lowercase =[]
else:
blocks.append('\n'.join(A__ ) )
_lowercase =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('\n'.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def a ( A__ : int ) -> Union[str, Any]:
"""simple docstring"""
def _inner(A__ : Any ):
return key(A__ ).lower().replace('_' , '' )
return _inner
def a ( A__ : Any , A__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
def noop(A__ : Optional[int] ):
return x
if key is None:
_lowercase =noop
# Constants are all uppercase, they go first.
_lowercase =[obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowercase =[obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
_lowercase =[obj for obj in objects if not key(A__ )[0].isupper()]
_lowercase =ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def a ( A__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def _replace(A__ : Optional[int] ):
_lowercase =match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
_lowercase =[part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase =keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(A__ )] ) + "]"
_lowercase =import_statement.split('\n' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowercase =2 if lines[1].strip() == '[' else 1
_lowercase =[(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowercase =sort_objects(A__ , key=lambda A__ : x[1] )
_lowercase =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowercase =_re_bracket_content.sub(_replace , lines[1] )
else:
_lowercase =[part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase =keys[:-1]
_lowercase =get_indent(lines[1] ) + ', '.join([F'''"{k}"''' for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
_lowercase =_re_bracket_content.sub(_replace , A__ )
return import_statement
def a ( A__ : Dict , A__ : int=True ) -> Optional[Any]:
"""simple docstring"""
with open(A__ , encoding='utf-8' ) as f:
_lowercase =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowercase =split_code_in_indented_blocks(
A__ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowercase =main_blocks[block_idx]
_lowercase =block.split('\n' )
# Get to the start of the imports.
_lowercase =0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowercase =len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowercase ='\n'.join(block_lines[line_idx:-1] )
_lowercase =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowercase =split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowercase =_re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowercase =[(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowercase =[(i, key) for i, key in enumerate(A__ ) if key is not None]
_lowercase =[x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowercase =0
_lowercase =[]
for i in range(len(A__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowercase =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
_lowercase ='\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(A__ ) )
def a ( A__ : List[Any]=True ) -> List[str]:
"""simple docstring"""
_lowercase =[]
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
_lowercase =sort_imports(os.path.join(A__ , '__init__.py' ) , check_only=A__ )
if result:
_lowercase =[os.path.join(A__ , '__init__.py' )]
if len(A__ ) > 0:
raise ValueError(F'''Would overwrite {len(A__ )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowercase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 205 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = KandinskyVaaPriorPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = ['prompt']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['prompt', 'negative_prompt']
SCREAMING_SNAKE_CASE_ : Tuple = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE_ : Optional[int] = False
@property
def lowerCamelCase__ ( self ):
return 32
@property
def lowerCamelCase__ ( self ):
return 32
@property
def lowerCamelCase__ ( self ):
return self.time_input_dim
@property
def lowerCamelCase__ ( self ):
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ):
return 1_00
@property
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : Union[str, Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
_lowercase : Any = PriorTransformer(**_SCREAMING_SNAKE_CASE )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_lowercase : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=2_24 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=14 ,)
_lowercase : List[Any] = CLIPVisionModelWithProjection(_SCREAMING_SNAKE_CASE )
return model
@property
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPImageProcessor(
crop_size=2_24 ,do_center_crop=_SCREAMING_SNAKE_CASE ,do_normalize=_SCREAMING_SNAKE_CASE ,do_resize=_SCREAMING_SNAKE_CASE ,image_mean=[0.48145466, 0.4578275, 0.40821073] ,image_std=[0.26862954, 0.26130258, 0.27577711] ,resample=3 ,size=2_24 ,)
return image_processor
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = self.dummy_prior
_lowercase : Optional[Any] = self.dummy_image_encoder
_lowercase : Union[str, Any] = self.dummy_text_encoder
_lowercase : Tuple = self.dummy_tokenizer
_lowercase : str = self.dummy_image_processor
_lowercase : int = UnCLIPScheduler(
variance_type="""fixed_small_log""" ,prediction_type="""sample""" ,num_train_timesteps=10_00 ,clip_sample=_SCREAMING_SNAKE_CASE ,clip_sample_range=10.0 ,)
_lowercase : List[str] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
_lowercase : List[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
_lowercase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self ):
_lowercase : int = """cpu"""
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : Dict = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
_lowercase : Any = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_lowercase : str = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
_lowercase : List[str] = output.image_embeds
_lowercase : Dict = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) ,return_dict=_SCREAMING_SNAKE_CASE ,)[0]
_lowercase : Optional[int] = image[0, -10:]
_lowercase : Optional[int] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_lowercase : Optional[int] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCamelCase__ ( self ):
_lowercase : List[str] = torch_device == """cpu"""
_lowercase : Optional[Any] = True
_lowercase : int = False
self._test_inference_batch_single_identical(
test_max_difference=_SCREAMING_SNAKE_CASE ,relax_max_difference=_SCREAMING_SNAKE_CASE ,test_mean_pixel_difference=_SCREAMING_SNAKE_CASE ,)
@skip_mps
def lowerCamelCase__ ( self ):
_lowercase : str = torch_device == """cpu"""
_lowercase : Optional[int] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_SCREAMING_SNAKE_CASE ,test_mean_pixel_difference=_SCREAMING_SNAKE_CASE ,)
| 368 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = """<s>"""
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(UpperCAmelCase_ ) ,10_06 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] )
_lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self ):
# fmt: off
_lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 336 | 0 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_UpperCAmelCase : Union[str, Any] = True
except ImportError:
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def A ( lowercase ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowercase ( __UpperCAmelCase ):
@staticmethod
def __UpperCamelCase ( A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=A_ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=A_ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self , A_ , A_ , A_=None , *A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = testing
UpperCamelCase = testing_file
UpperCamelCase = path
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
UpperCamelCase = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
UpperCamelCase = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=A_ , extra_context=A_ , )
UpperCamelCase = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
UpperCamelCase = json.load(A_ )
UpperCamelCase = configuration['lowercase_modelname']
UpperCamelCase = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'''{directory}/configuration.json''' )
UpperCamelCase = 'PyTorch' in generate_tensorflow_pytorch_and_flax
UpperCamelCase = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
UpperCamelCase = 'Flax' in generate_tensorflow_pytorch_and_flax
UpperCamelCase = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(A_ , exist_ok=A_ )
os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , 'w' ):
pass
shutil.move(
F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , )
shutil.move(
F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(A_ ):
with open(A_ , 'r' ) as f:
UpperCamelCase = f.readlines()
with open(A_ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ , A_ , A_ ):
# Create temp file
UpperCamelCase = mkstemp()
UpperCamelCase = False
with fdopen(A_ , 'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
UpperCamelCase = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(A_ , A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ , A_ )
def skip_units(A_ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ ):
with open(A_ ) as datafile:
UpperCamelCase = []
UpperCamelCase = False
UpperCamelCase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase = line.split('"' )[1]
UpperCamelCase = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
UpperCamelCase = line.split('"' )[1]
UpperCamelCase = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ , A_ , A_ )
UpperCamelCase = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(A_ )
| 222 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase)
@torch.no_grad()
def __call__( self , lowercase = 1 , lowercase = 100 , lowercase = None , lowercase = None , lowercase = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
a__ : Optional[int] = self.unet.config.sample_size / self.unet.config.sample_rate
a__ : int = audio_length_in_s * self.unet.config.sample_rate
a__ : Union[str, Any] = 2 ** len(self.unet.up_blocks)
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
F' {3 * down_scale_factor / self.unet.config.sample_rate}.')
a__ : str = int(lowercase)
if sample_size % down_scale_factor != 0:
a__ : List[str] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
' process.')
a__ : List[Any] = int(lowercase)
a__ : int = next(iter(self.unet.parameters())).dtype
a__ : Tuple = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase , lowercase) and len(lowercase) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(lowercase)}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.')
a__ : Optional[Any] = randn_tensor(lowercase , generator=lowercase , device=self.device , dtype=lowercase)
# set step values
self.scheduler.set_timesteps(lowercase , device=audio.device)
a__ : Union[str, Any] = self.scheduler.timesteps.to(lowercase)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
a__ : Dict = self.unet(lowercase , lowercase).sample
# 2. compute previous image: x_t -> t_t-1
a__ : Any = self.scheduler.step(lowercase , lowercase , lowercase).prev_sample
a__ : str = audio.clamp(-1 , 1).float().cpu().numpy()
a__ : List[Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase)
| 99 | 0 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : Optional[int] = logging.get_logger(__name__)
A__ : Optional[int] = {'vocab_file': 'spiece.model'}
A__ : Optional[Any] = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
A__ : str = {
'AI-Sweden/gpt-sw3-126m': 2_048,
'AI-Sweden/gpt-sw3-350m': 2_048,
'AI-Sweden/gpt-sw3-1.6b': 2_048,
'AI-Sweden/gpt-sw3-6.7b': 2_048,
'AI-Sweden/gpt-sw3-20b': 2_048,
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Optional[int] = VOCAB_FILES_NAMES
_UpperCAmelCase :int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :List[str] = ["input_ids", "attention_mask"]
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : int=False , snake_case__ : List[Any]=False , snake_case__ : Optional[int]=False , snake_case__ : Tuple=None , snake_case__ : Any=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Optional[int] , ):
lowerCamelCase_ : int ={} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase_ : Optional[int] =kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
lowerCamelCase_ : int ="None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCamelCase_ : List[Any] ="<|endoftext|>" if eos_token is None else eos_token
lowerCamelCase_ : Optional[Any] ="<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCamelCase_ : Union[str, Any] =unk_token if pad_token is None else pad_token
lowerCamelCase_ : List[str] =eos_token if bos_token is None else bos_token
else:
lowerCamelCase_ : Any ="<pad>" if pad_token is None else pad_token
lowerCamelCase_ : List[Any] ="<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
lowerCamelCase_ : Tuple =do_lower_case
lowerCamelCase_ : List[Any] =remove_space
lowerCamelCase_ : Union[str, Any] =keep_accents
lowerCamelCase_ : str =vocab_file
lowerCamelCase_ : int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# Used for whitespace normalization in input texts
# fmt : off
lowerCamelCase_ : Dict ={" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCamelCase_ : int =re.compile(
F"""[{''.join(map(snake_case__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self : Optional[int] ):
lowerCamelCase_ : int =self.__dict__.copy()
lowerCamelCase_ : Tuple =None
return state
def __setstate__( self : Optional[Any] , snake_case__ : Optional[Any] ):
lowerCamelCase_ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : Tuple ={}
lowerCamelCase_ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase__ ( self : Tuple ):
return len(self.sp_model )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : str ):
lowerCamelCase_ : List[Any] =self.non_printing_characters_re.sub("" , snake_case__ )
# Normalize whitespaces
lowerCamelCase_ : Union[str, Any] ="".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
lowerCamelCase_ : str =unicodedata.normalize("NFC" , snake_case__ )
return text
def UpperCAmelCase__ ( self : Dict , snake_case__ : str , **snake_case__ : Dict ):
lowerCamelCase_ : Any =self.preprocess_text(snake_case__ )
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : str ):
return self.sp_model.PieceToId(snake_case__ )
def UpperCAmelCase__ ( self : int , snake_case__ : int ):
return self.sp_model.IdToPiece(snake_case__ )
@staticmethod
def UpperCAmelCase__ ( snake_case__ : str ):
return out_string
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[str] ):
lowerCamelCase_ : Union[str, Any] =[]
lowerCamelCase_ : Any =""
lowerCamelCase_ : List[str] =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
lowerCamelCase_ : List[str] =True
lowerCamelCase_ : List[Any] =[]
else:
current_sub_tokens.append(snake_case__ )
lowerCamelCase_ : Tuple =False
out_string += self.sp_model.decode(snake_case__ )
return out_string
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Tuple ={self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
lowerCamelCase_ : Any =self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Union[str, List[str]] , snake_case__ : Union[str, bool] = False ):
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : Optional[int] =self.preprocess_text(snake_case__ )
lowerCamelCase_ : Optional[int] =self.sp_model.encode(snake_case__ )
else:
lowerCamelCase_ : Dict =[self.preprocess_text(snake_case__ ) for t in text]
lowerCamelCase_ : List[Any] =self.sp_model.encode(snake_case__ )
if return_tensors is True or return_tensors == "pt":
lowerCamelCase_ : Optional[int] =torch.tensor(snake_case__ )
return token_ids
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[int, List[int]] ):
return self.sp_model.decode(snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : "Conversation" ):
lowerCamelCase_ : Tuple =[F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowerCamelCase_ : int =(
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(snake_case__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=snake_case__ )
| 354 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : List[Any] = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209 | 0 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase ( snake_case_ ):
_lowercase: Union[str, Any] = ['''image_processor''', '''tokenizer''']
_lowercase: int = '''AutoImageProcessor'''
_lowercase: Optional[int] = '''AutoTokenizer'''
def __init__( self : int , __snake_case : Tuple=None , __snake_case : Optional[int]=None , **__snake_case : Tuple ) -> List[Any]:
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __snake_case , )
_lowerCAmelCase = kwargs.pop("""feature_extractor""" )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__snake_case , __snake_case )
_lowerCAmelCase = self.image_processor
_lowerCAmelCase = False
def __call__( self : Dict , *__snake_case : Optional[int] , **__snake_case : Union[str, Any] ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
_lowerCAmelCase = kwargs.pop("""images""" , __snake_case )
_lowerCAmelCase = kwargs.pop("""text""" , __snake_case )
if len(__snake_case ) > 0:
_lowerCAmelCase = args[0]
_lowerCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_lowerCAmelCase = self.image_processor(__snake_case , *__snake_case , **__snake_case )
if text is not None:
_lowerCAmelCase = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCAmelCase = encodings["""input_ids"""]
return inputs
def lowercase__ ( self : List[Any] , *__snake_case : Dict , **__snake_case : List[str] ) -> int:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowercase__ ( self : int , *__snake_case : Tuple , **__snake_case : Optional[Any] ) -> Any:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@contextmanager
def lowercase__ ( self : int ) -> Optional[Any]:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_lowerCAmelCase = True
_lowerCAmelCase = self.tokenizer
yield
_lowerCAmelCase = self.image_processor
_lowerCAmelCase = False
def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any]=False , __snake_case : Dict=None ) -> Tuple:
if added_vocab is None:
_lowerCAmelCase = self.tokenizer.get_added_vocab()
_lowerCAmelCase = {}
while tokens:
_lowerCAmelCase = re.search(R"""<s_(.*?)>""" , __snake_case , re.IGNORECASE )
if start_token is None:
break
_lowerCAmelCase = start_token.group(1 )
_lowerCAmelCase = re.search(Rf"</s_{key}>" , __snake_case , re.IGNORECASE )
_lowerCAmelCase = start_token.group()
if end_token is None:
_lowerCAmelCase = tokens.replace(__snake_case , """""" )
else:
_lowerCAmelCase = end_token.group()
_lowerCAmelCase = re.escape(__snake_case )
_lowerCAmelCase = re.escape(__snake_case )
_lowerCAmelCase = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , __snake_case , re.IGNORECASE )
if content is not None:
_lowerCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_lowerCAmelCase = self.tokenajson(__snake_case , is_inner_value=__snake_case , added_vocab=__snake_case )
if value:
if len(__snake_case ) == 1:
_lowerCAmelCase = value[0]
_lowerCAmelCase = value
else: # leaf nodes
_lowerCAmelCase = []
for leaf in content.split(R"""<sep/>""" ):
_lowerCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_lowerCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__snake_case )
if len(output[key] ) == 1:
_lowerCAmelCase = output[key][0]
_lowerCAmelCase = tokens[tokens.find(__snake_case ) + len(__snake_case ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__snake_case , added_vocab=__snake_case )
if len(__snake_case ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , )
return self.image_processor_class
@property
def lowercase__ ( self : List[Any] ) -> Any:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , )
return self.image_processor
| 70 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = AudioLDMPipeline
__lowerCamelCase = TEXT_TO_AUDIO_PARAMS
__lowerCamelCase = TEXT_TO_AUDIO_BATCH_PARAMS
__lowerCamelCase = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_snake_case , )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_lowerCAmelCase = ClapTextModelWithProjection(_snake_case )
_lowerCAmelCase = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
_lowerCAmelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_snake_case , )
_lowerCAmelCase = SpeechTaHifiGan(_snake_case )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def snake_case ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
_lowerCAmelCase = prompt_embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * ["""this is a negative prompt"""]
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
embeds.append(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = """egg cracking"""
_lowerCAmelCase = audioldm_pipe(**_snake_case , negative_prompt=_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = audioldm_pipe.vocoder.config.sampling_rate
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.016 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.032 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = ["""hey"""]
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
assert audio_shape == (1, 256)
_lowerCAmelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_lowerCAmelCase = SpeechTaHifiGan(_snake_case ).to(_snake_case )
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def snake_case ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , _snake_case , _snake_case="cpu" , _snake_case=torch.floataa , _snake_case=0 ):
"""simple docstring"""
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = np.random.RandomState(_snake_case ).standard_normal((1, 8, 128, 16) )
_lowerCAmelCase = torch.from_numpy(_snake_case ).to(device=_snake_case , dtype=_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = 25
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[77230:77240]
_lowerCAmelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[27780:27790]
_lowerCAmelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 82 | 0 |
from collections.abc import Generator
from math import sin
def _lowercase ( UpperCamelCase_ ) -> bytes:
'''simple docstring'''
if len(UpperCamelCase_ ) != 32:
raise ValueError('Input must be of length 32' )
SCREAMING_SNAKE_CASE__ = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _lowercase ( UpperCamelCase_ ) -> bytes:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
SCREAMING_SNAKE_CASE__ = format(UpperCamelCase_ , '08x' )[-8:]
SCREAMING_SNAKE_CASE__ = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _lowercase ( UpperCamelCase_ ) -> bytes:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = b''
for char in message:
bit_string += format(UpperCamelCase_ , '08b' ).encode('utf-8' )
SCREAMING_SNAKE_CASE__ = format(len(UpperCamelCase_ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase_ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _lowercase ( UpperCamelCase_ ) -> Generator[list[int], None, None]:
'''simple docstring'''
if len(UpperCamelCase_ ) % 512 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(UpperCamelCase_ ) , 512 ):
SCREAMING_SNAKE_CASE__ = bit_string[pos : pos + 512]
SCREAMING_SNAKE_CASE__ = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
SCREAMING_SNAKE_CASE__ = format(UpperCamelCase_ , '032b' )
SCREAMING_SNAKE_CASE__ = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase_ , 2 )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
return (a + b) % 2**32
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _lowercase ( UpperCamelCase_ ) -> bytes:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = preprocess(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
SCREAMING_SNAKE_CASE__ = 0x67_45_23_01
SCREAMING_SNAKE_CASE__ = 0xEF_CD_AB_89
SCREAMING_SNAKE_CASE__ = 0x98_BA_DC_FE
SCREAMING_SNAKE_CASE__ = 0x10_32_54_76
SCREAMING_SNAKE_CASE__ = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = aa
SCREAMING_SNAKE_CASE__ = ba
SCREAMING_SNAKE_CASE__ = ca
SCREAMING_SNAKE_CASE__ = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
SCREAMING_SNAKE_CASE__ = d ^ (b & (c ^ d))
SCREAMING_SNAKE_CASE__ = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
SCREAMING_SNAKE_CASE__ = c ^ (d & (b ^ c))
SCREAMING_SNAKE_CASE__ = (5 * i + 1) % 16
elif i <= 47:
SCREAMING_SNAKE_CASE__ = b ^ c ^ d
SCREAMING_SNAKE_CASE__ = (3 * i + 5) % 16
else:
SCREAMING_SNAKE_CASE__ = c ^ (b | not_aa(UpperCamelCase_ ))
SCREAMING_SNAKE_CASE__ = (7 * i) % 16
SCREAMING_SNAKE_CASE__ = (f + a + added_consts[i] + block_words[g]) % 2**32
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = c
SCREAMING_SNAKE_CASE__ = b
SCREAMING_SNAKE_CASE__ = sum_aa(UpperCamelCase_ , left_rotate_aa(UpperCamelCase_ , shift_amounts[i] ) )
# Add hashed chunk to running total
SCREAMING_SNAKE_CASE__ = sum_aa(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = sum_aa(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = sum_aa(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = sum_aa(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = reformat_hex(UpperCamelCase_ ) + reformat_hex(UpperCamelCase_ ) + reformat_hex(UpperCamelCase_ ) + reformat_hex(UpperCamelCase_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Tuple =FlaxAutoencoderKL
@property
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = (32, 32)
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.uniform(UpperCAmelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE__ = self.dummy_input
return init_dict, inputs_dict
| 169 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = KandinskyVaaImgaImgPipeline
__UpperCAmelCase : Optional[Any] = ["image_embeds", "negative_image_embeds", "image"]
__UpperCAmelCase : int = [
"image_embeds",
"negative_image_embeds",
"image",
]
__UpperCAmelCase : List[str] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__UpperCAmelCase : List[Any] = False
@property
def _lowercase ( self : Optional[int] ):
return 3_2
@property
def _lowercase ( self : Union[str, Any] ):
return 3_2
@property
def _lowercase ( self : Optional[int] ):
return self.time_input_dim
@property
def _lowercase ( self : Any ):
return self.time_input_dim * 4
@property
def _lowercase ( self : Any ):
return 1_0_0
@property
def _lowercase ( self : Any ):
torch.manual_seed(0 )
__lowercase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowercase = UNetaDConditionModel(**UpperCAmelCase__ )
return model
@property
def _lowercase ( self : Optional[Any] ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self : Optional[Any] ):
torch.manual_seed(0 )
__lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.dummy_unet
__lowercase = self.dummy_movq
__lowercase = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowercase = DDIMScheduler(**UpperCAmelCase__ )
__lowercase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any]=0 ):
__lowercase = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__lowercase = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase__ )
# create init_image
__lowercase = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__lowercase = image.cpu().permute(0, 2, 3, 1 )[0]
__lowercase = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
if str(UpperCAmelCase__ ).startswith("mps" ):
__lowercase = torch.manual_seed(UpperCAmelCase__ )
else:
__lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__lowercase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def _lowercase ( self : Optional[Any] ):
__lowercase = "cpu"
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**UpperCAmelCase__ )
__lowercase = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) )
__lowercase = output.images
__lowercase = pipe(
**self.get_dummy_inputs(UpperCAmelCase__ ), return_dict=UpperCAmelCase__, )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Dict ):
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowercase = "A red cartoon frog, 4k"
__lowercase = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase__ )
__lowercase = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.floataa )
__lowercase = pipeline.to(UpperCAmelCase__ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase ,__lowercase = pipe_prior(
UpperCAmelCase__, generator=UpperCAmelCase__, num_inference_steps=5, negative_prompt="", ).to_tuple()
__lowercase = pipeline(
image=UpperCAmelCase__, image_embeds=UpperCAmelCase__, negative_image_embeds=UpperCAmelCase__, generator=UpperCAmelCase__, num_inference_steps=1_0_0, height=7_6_8, width=7_6_8, strength=0.2, output_type="np", )
__lowercase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(UpperCAmelCase__, UpperCAmelCase__ )
| 17 | '''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Any , A : Optional[int]=None , A : Tuple=None , *A : Tuple , **A : List[str] ):
super().__init__(*A , **A )
if config is None:
assert isinstance(self.model , A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_UpperCAmelCase : str = self.model.config
else:
_UpperCAmelCase : List[str] = config
_UpperCAmelCase : List[Any] = data_args
_UpperCAmelCase : str = self.config.tgt_vocab_size if isinstance(self.config , A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
_UpperCAmelCase : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase : Dict = label_smoothed_nll_loss
def _A ( self : Tuple , A : int ):
if self.optimizer is None:
_UpperCAmelCase : Tuple = ["bias", "LayerNorm.weight"]
_UpperCAmelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
_UpperCAmelCase : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase : List[str] = Adafactor
_UpperCAmelCase : List[Any] = {"scale_parameter": False, "relative_step": False}
else:
_UpperCAmelCase : List[str] = AdamW
_UpperCAmelCase : List[str] = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
_UpperCAmelCase : List[Any] = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase : List[Any] = OSS(
params=A , optim=A , **A , )
else:
_UpperCAmelCase : Union[str, Any] = optimizer_cls(A , **A )
if self.lr_scheduler is None:
_UpperCAmelCase : List[str] = self._get_lr_scheduler(A )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _A ( self : List[str] , A : Optional[int] ):
_UpperCAmelCase : List[str] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase : str = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A )
return scheduler
def _A ( self : Tuple ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _A ( self : Any , A : Union[str, Any] , A : Union[str, Any] , A : List[Any] ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase : List[str] = model(**A , use_cache=A )[0]
_UpperCAmelCase : int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase : Any = model(**A , labels=A , use_cache=A )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase : Optional[int] = model(**A , use_cache=A )[0]
_UpperCAmelCase : List[str] = torch.nn.functional.log_softmax(A , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.loss_fn(A , A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _A ( self : List[str] , A : Optional[int] , A : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = inputs.pop("labels" )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._compute_loss(A , A , A )
return loss
def _A ( self : List[str] , A : nn.Module , A : Dict[str, Union[torch.Tensor, Any]] , A : bool , A : Optional[List[str]] = None , ):
_UpperCAmelCase : List[str] = self._prepare_inputs(A )
_UpperCAmelCase : Dict = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase : Dict = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : int = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
_UpperCAmelCase : Any = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase : str = self._compute_loss(A , A , A )
_UpperCAmelCase : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : Optional[Any] = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _A ( self : Dict , A : int , A : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase : Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
F""" padded to `max_length`={max_length}""" )
_UpperCAmelCase : Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase : Tuple = tensor
return padded_tensor
| 31 | 0 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_UpperCamelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Whether to use SortishSampler or not."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(__a , __a ):
UpperCAmelCase__ = v.to_dict()
return d
| 335 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__(self , *,
__a = 4 , __a = 768 , __a , __a , ) -> str:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.Linear(__a , __a )
# parameters for encoder hidden states
UpperCAmelCase__ = clip_extra_context_tokens
UpperCAmelCase__ = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.LayerNorm(__a )
def UpperCamelCase__ (self , *, __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCAmelCase__ = image_embeddings.shape[0]
UpperCAmelCase__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCAmelCase__ = classifier_free_guidance_embeddings.expand(
__a , -1 )
UpperCAmelCase__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCAmelCase__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCAmelCase__ = self.embedding_proj(__a )
UpperCAmelCase__ = self.clip_image_embeddings_project_to_time_embeddings(__a )
UpperCAmelCase__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCAmelCase__ = self.clip_extra_context_tokens_proj(__a )
UpperCAmelCase__ = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
UpperCAmelCase__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCAmelCase__ = self.encoder_hidden_states_proj(__a )
UpperCAmelCase__ = self.text_encoder_hidden_states_norm(__a )
UpperCAmelCase__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 335 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a ={
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 73 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
A__ : Union[str, Any] = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
A__ : Optional[int] = '''hopper-medium-v2'''
A__ : int = gym.make(env_name)
A__ : Optional[int] = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
A__ : int = env.reset()
A__ : Optional[int] = 0
A__ : Union[str, Any] = 0
A__ : Union[str, Any] = 1000
A__ : Optional[Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
A__ : Union[str, Any] = pipeline(obs, planning_horizon=32)
# execute action in environment
A__ , A__ , A__ , A__ : str = env.step(denorm_actions)
A__ : Dict = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
A__ : List[str] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 103 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , lowerCAmelCase : int = 1_28 , lowerCAmelCase : int = 2_56 , lowerCAmelCase : float = 2000.0 , lowerCAmelCase : int = 7_68 , lowerCAmelCase : int = 12 , lowerCAmelCase : int = 12 , lowerCAmelCase : int = 64 , lowerCAmelCase : int = 20_48 , lowerCAmelCase : float = 0.1 , ) -> Any:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Any = nn.Sequential(
nn.Linear(lowerCAmelCase , d_model * 4 , bias=lowerCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase ) , nn.SiLU() , )
__lowerCAmelCase : Union[str, Any] = nn.Embedding(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Optional[int] = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = nn.Dropout(p=lowerCAmelCase )
__lowerCAmelCase : List[Any] = nn.ModuleList()
for lyr_num in range(lowerCAmelCase ):
# FiLM conditional T5 decoder
__lowerCAmelCase : List[str] = DecoderLayer(d_model=lowerCAmelCase , d_kv=lowerCAmelCase , num_heads=lowerCAmelCase , d_ff=lowerCAmelCase , dropout_rate=lowerCAmelCase )
self.decoders.append(lowerCAmelCase )
__lowerCAmelCase : List[Any] = TaLayerNorm(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = nn.Dropout(p=lowerCAmelCase )
__lowerCAmelCase : int = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowerCAmelCase : Union[str, Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__lowerCAmelCase : List[str] = self.conditioning_emb(lowerCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowerCAmelCase : List[Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowerCAmelCase : Optional[Any] = torch.broadcast_to(
torch.arange(lowerCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__lowerCAmelCase : Tuple = self.position_encoding(lowerCAmelCase )
__lowerCAmelCase : Dict = self.continuous_inputs_projection(lowerCAmelCase )
inputs += position_encodings
__lowerCAmelCase : Optional[int] = self.dropout(lowerCAmelCase )
# decoder: No padding present.
__lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__lowerCAmelCase : str = [(x, self.encoder_decoder_mask(lowerCAmelCase , lowerCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__lowerCAmelCase : Any = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__lowerCAmelCase : Dict = lyr(
lowerCAmelCase , conditioning_emb=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )[0]
__lowerCAmelCase : List[Any] = self.decoder_norm(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = self.post_dropout(lowerCAmelCase )
__lowerCAmelCase : Dict = self.spec_out(lowerCAmelCase )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any=1e-6 ) -> int:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase , d_kv=lowerCAmelCase , num_heads=lowerCAmelCase , dropout_rate=lowerCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase , d_kv=lowerCAmelCase , num_heads=lowerCAmelCase , dropout_rate=lowerCAmelCase , layer_norm_epsilon=lowerCAmelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase , d_ff=lowerCAmelCase , dropout_rate=lowerCAmelCase , layer_norm_epsilon=lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.layer[0](
lowerCAmelCase , conditioning_emb=lowerCAmelCase , attention_mask=lowerCAmelCase , )
if encoder_hidden_states is not None:
__lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
__lowerCAmelCase : Union[str, Any] = self.layer[1](
lowerCAmelCase , key_value_states=lowerCAmelCase , attention_mask=lowerCAmelCase , )
# Apply Film Conditional Feed Forward layer
__lowerCAmelCase : List[Any] = self.layer[-1](lowerCAmelCase , lowerCAmelCase )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ) -> str:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = TaLayerNorm(lowerCAmelCase )
__lowerCAmelCase : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase )
__lowerCAmelCase : Dict = Attention(query_dim=lowerCAmelCase , heads=lowerCAmelCase , dim_head=lowerCAmelCase , out_bias=lowerCAmelCase , scale_qk=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = nn.Dropout(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Tuple=None , ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = self.layer_norm(lowerCAmelCase )
if conditioning_emb is not None:
__lowerCAmelCase : int = self.FiLMLayer(lowerCAmelCase , lowerCAmelCase )
# Self-attention block
__lowerCAmelCase : List[Any] = self.attention(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = hidden_states + self.dropout(lowerCAmelCase )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[Any] = Attention(query_dim=lowerCAmelCase , heads=lowerCAmelCase , dim_head=lowerCAmelCase , out_bias=lowerCAmelCase , scale_qk=lowerCAmelCase )
__lowerCAmelCase : List[str] = TaLayerNorm(lowerCAmelCase , eps=lowerCAmelCase )
__lowerCAmelCase : int = nn.Dropout(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple=None , lowerCAmelCase : int=None , ) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = self.layer_norm(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = self.attention(
lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , attention_mask=attention_mask.squeeze(1 ) , )
__lowerCAmelCase : Union[str, Any] = hidden_states + self.dropout(lowerCAmelCase )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[Any] = TaDenseGatedActDense(d_model=lowerCAmelCase , d_ff=lowerCAmelCase , dropout_rate=lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase )
__lowerCAmelCase : str = TaLayerNorm(lowerCAmelCase , eps=lowerCAmelCase )
__lowerCAmelCase : Any = nn.Dropout(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]=None ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.layer_norm(lowerCAmelCase )
if conditioning_emb is not None:
__lowerCAmelCase : Optional[int] = self.film(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = self.DenseReluDense(lowerCAmelCase )
__lowerCAmelCase : List[Any] = hidden_states + self.dropout(lowerCAmelCase )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : int = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
__lowerCAmelCase : Dict = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
__lowerCAmelCase : List[Any] = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
__lowerCAmelCase : List[Any] = nn.Dropout(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = NewGELUActivation()
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.act(self.wi_a(lowerCAmelCase ) )
__lowerCAmelCase : Optional[Any] = self.wi_a(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = hidden_gelu * hidden_linear
__lowerCAmelCase : Dict = self.dropout(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = self.wo(lowerCAmelCase )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int=1e-6 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : List[str] = nn.Parameter(torch.ones(lowerCAmelCase ) )
__lowerCAmelCase : Optional[int] = eps
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Tuple ) -> int:
"""simple docstring"""
__lowerCAmelCase : str = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowerCAmelCase : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(lowerCAmelCase , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Union[str, Any] = nn.Linear(lowerCAmelCase , out_features * 2 , bias=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.scale_bias(lowerCAmelCase )
__lowerCAmelCase ,__lowerCAmelCase : Dict = torch.chunk(lowerCAmelCase , 2 , -1 )
__lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 139 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__UpperCAmelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__UpperCAmelCase = concatenate_datasets
__UpperCAmelCase = DownloadConfig
__UpperCAmelCase = DownloadManager
__UpperCAmelCase = DownloadMode
__UpperCAmelCase = DownloadConfig
__UpperCAmelCase = DownloadMode
__UpperCAmelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 139 | 1 |
def lowerCAmelCase_ ( _lowercase : list[int] , _lowercase : str) -> list[int]:
"""simple docstring"""
a__ : Dict = int(_lowercase)
# Initialize Result
a__ : List[Any] = []
# Traverse through all denomination
for denomination in reversed(_lowercase):
# Find denominations
while int(_lowercase) >= int(_lowercase):
total_value -= int(_lowercase)
answer.append(_lowercase) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_lowercase : List[str] =[]
_lowercase : List[str] ="0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
_lowercase : str =int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f'Denomination {i}: ').strip()))
_lowercase : List[Any] =input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
_lowercase : str =[1, 2, 5, 10, 20, 50, 100, 500, 2000]
_lowercase : str =input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f'Following is minimal change for {value}: ')
_lowercase : Optional[int] =find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 170 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowercase : int =logging.getLogger(__name__)
@dataclass
class snake_case__ :
"""simple docstring"""
__lowerCAmelCase :Optional[str] = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__lowerCAmelCase :Optional[str] = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
__lowerCAmelCase :int = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__lowerCAmelCase :bool = field(
default=A__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__lowerCAmelCase :bool = field(
default=A__ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__lowerCAmelCase :Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__lowerCAmelCase :Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__lowerCAmelCase :Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
__lowerCAmelCase :Optional[str] = field(
default=A__ , metadata={"help": "A csv or a json file containing the training data."} )
__lowerCAmelCase :Optional[str] = field(
default=A__ , metadata={"help": "A csv or a json file containing the validation data."} )
__lowerCAmelCase :Optional[str] = field(default=A__ , metadata={"help": "A csv or a json file containing the test data."} )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
a__ : Dict = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
a__ : List[str] = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class snake_case__ :
"""simple docstring"""
__lowerCAmelCase :str = field(
default=A__ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowerCAmelCase :Optional[str] = field(
default=A__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCAmelCase :Optional[str] = field(
default=A__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowerCAmelCase :Optional[str] = field(
default=A__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__lowerCAmelCase :bool = field(
default=A__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__lowerCAmelCase :str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__lowerCAmelCase :bool = field(
default=A__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(""".json"""):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__ , a__ , a__ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
a__ , a__ , a__ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout)] , )
a__ : Dict = training_args.get_process_log_level()
logger.setLevel(_lowercase)
datasets.utils.logging.set_verbosity(_lowercase)
transformers.utils.logging.set_verbosity(_lowercase)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Detecting last checkpoint.
a__ : Any = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
a__ : str = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""")
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
a__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir)
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
a__ : Tuple = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
a__ : Tuple = data_args.train_file.split(""".""")[-1]
a__ : Any = data_args.test_file.split(""".""")[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
a__ : int = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""")
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''')
if data_args.train_file.endswith(""".csv"""):
# Loading a dataset from local csv files
a__ : int = load_dataset("""csv""" , data_files=_lowercase , cache_dir=model_args.cache_dir)
else:
# Loading a dataset from local json files
a__ : Dict = load_dataset("""json""" , data_files=_lowercase , cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
a__ : int = raw_datasets["""train"""].features["""label"""].names
a__ : Any = len(_lowercase)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
a__ : List[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_lowercase , )
a__ : Dict = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
a__ : List[Any] = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
a__ : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
a__ : Union[str, Any] = {"""Refused""": 0, """Entailed""": 1}
a__ : Dict = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''')
a__ : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length)
def preprocess_tabfact_function(_lowercase : Tuple):
# Tokenize the texts
def _convert_table_text_to_pandas(_lowercase : Dict):
a__ : Dict = [_table_row.split("""#""") for _table_row in _table_text.strip("""\n""").split("""\n""")]
a__ : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0])
return _table_pd
a__ : str = examples["""statement"""]
a__ : Union[str, Any] = list(map(_convert_table_text_to_pandas , examples["""table_text"""]))
a__ : Tuple = tokenizer(_lowercase , _lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase)
a__ : int = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing"""):
a__ : List[str] = raw_datasets.map(
_lowercase , batched=_lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""")
a__ : Optional[Any] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
a__ : str = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""")
a__ : List[str] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
a__ : List[str] = eval_dataset.select(range(data_args.max_eval_samples))
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""")
a__ : Any = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
a__ : Dict = predict_dataset.select(range(data_args.max_predict_samples))
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_lowercase)) , 3):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''')
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowercase : EvalPrediction):
a__ : Optional[int] = p.predictions[0] if isinstance(p.predictions , _lowercase) else p.predictions
a__ : str = np.argmax(_lowercase , axis=1)
return {"accuracy": (preds == p.label_ids).astype(np.floataa).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
a__ : Dict = default_data_collator
elif training_args.fpaa:
a__ : Union[str, Any] = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8)
else:
a__ : int = None
# Initialize our Trainer
a__ : List[str] = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowercase , tokenizer=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
a__ : List[Any] = None
if training_args.resume_from_checkpoint is not None:
a__ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a__ : Dict = last_checkpoint
a__ : Dict = trainer.train(resume_from_checkpoint=_lowercase)
a__ : int = train_result.metrics
a__ : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase)
)
a__ : int = min(_lowercase , len(_lowercase))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , _lowercase)
trainer.save_metrics("""train""" , _lowercase)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
a__ : List[str] = trainer.evaluate(eval_dataset=_lowercase)
a__ : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase)
a__ : Dict = min(_lowercase , len(_lowercase))
trainer.log_metrics("""eval""" , _lowercase)
trainer.save_metrics("""eval""" , _lowercase)
if training_args.do_predict:
logger.info("""*** Predict ***""")
# Removing the `label` columns because it contains -1 and Trainer won't like that.
a__ : Any = predict_dataset.remove_columns("""label""")
a__ : Optional[Any] = trainer.predict(_lowercase , metric_key_prefix="""predict""").predictions
a__ : Any = np.argmax(_lowercase , axis=1)
a__ : List[str] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""")
if trainer.is_world_process_zero():
with open(_lowercase , """w""") as writer:
logger.info("""***** Predict Results *****""")
writer.write("""index\tprediction\n""")
for index, item in enumerate(_lowercase):
a__ : int = label_list[item]
writer.write(F'''{index}\t{item}\n''')
a__ : Tuple = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase)
else:
trainer.create_model_card(**_lowercase)
def lowerCAmelCase_ ( _lowercase : Any) -> Union[str, Any]:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 170 | 1 |
import random
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> bool:
UpperCAmelCase_ = num - 1
UpperCAmelCase_ = 0
while s % 2 == 0:
UpperCAmelCase_ = s // 2
t += 1
for _ in range(5 ):
UpperCAmelCase_ = random.randrange(2 , num - 1 )
UpperCAmelCase_ = pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if v != 1:
UpperCAmelCase_ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCAmelCase_ = i + 1
UpperCAmelCase_ = (v**2) % num
return True
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> bool:
if num < 2:
return False
UpperCAmelCase_ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 1024 ) -> int:
while True:
UpperCAmelCase_ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__UpperCamelCase ):
return num
if __name__ == "__main__":
_lowerCamelCase = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 177 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCamelCase = logging.get_logger(__name__)
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ['pixel_values']
def __init__( self : Any , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , __snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **__snake_case : Optional[Any] , ):
super().__init__(**__snake_case )
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 2_24}
UpperCAmelCase_ = get_size_dict(__snake_case , default_to_square=__snake_case )
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
UpperCAmelCase_ = get_size_dict(__snake_case , param_name='''crop_size''' )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase_ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ):
UpperCAmelCase_ = get_size_dict(__snake_case , default_to_square=__snake_case )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase_ = int((2_56 / 2_24) * size['''shortest_edge'''] )
UpperCAmelCase_ = get_resize_output_image_size(__snake_case , size=__snake_case , default_to_square=__snake_case )
UpperCAmelCase_ = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
__snake_case , size=(size_dict['''height'''], size_dict['''width''']) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : Tuple , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[int] , ):
UpperCAmelCase_ = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(__snake_case , size=(size['''height'''], size['''width''']) , data_format=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ):
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : int , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Union[str, Any] , ):
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : int , __snake_case : ImageInput , __snake_case : Optional[bool] = None , __snake_case : Optional[Dict[str, int]] = None , __snake_case : PILImageResampling = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Dict[str, int]] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[float] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[float, Iterable[float]]] = None , __snake_case : Optional[Union[float, Iterable[float]]] = None , __snake_case : Optional[TensorType] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(__snake_case , default_to_square=__snake_case )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(__snake_case , param_name='''crop_size''' )
UpperCAmelCase_ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(__snake_case , __snake_case , __snake_case ) for image in images]
if do_center_crop:
UpperCAmelCase_ = [self.center_crop(__snake_case , __snake_case ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(__snake_case , __snake_case ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(__snake_case , __snake_case , __snake_case ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
UpperCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 177 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 64 |
from typing import List
from .keymap import KEYMAP, get_character
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
def decorator(lowercase ):
snake_case : Tuple = getattr(lowercase ,"""handle_key""" ,[] )
handle += [key]
setattr(lowercase ,"""handle_key""" ,lowercase )
return func
return decorator
def SCREAMING_SNAKE_CASE__ ( *lowercase ) -> List[str]:
def decorator(lowercase ):
snake_case : Optional[int] = getattr(lowercase ,"""handle_key""" ,[] )
handle += keys
setattr(lowercase ,"""handle_key""" ,lowercase )
return func
return decorator
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __new__( cls , A , A , A ) -> str:
snake_case : int = super().__new__(cls , A , A , A )
if not hasattr(A , """key_handler""" ):
setattr(A , """key_handler""" , {} )
setattr(A , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
snake_case : Dict = getattr(A , """handle_key""" , [] )
for key in handled_keys:
snake_case : Any = value
return new_cls
@staticmethod
def UpperCAmelCase ( cls ) -> List[str]:
snake_case : Tuple = get_character()
if char != KEYMAP["undefined"]:
snake_case : str = ord(A )
snake_case : Optional[Any] = cls.key_handler.get(A )
if handler:
snake_case : Optional[Any] = char
return handler(cls )
else:
return None
def SCREAMING_SNAKE_CASE__ ( cls ) -> Tuple:
return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
| 124 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : int, __A : TransformeraDModel, __A : AutoencoderKL, __A : KarrasDiffusionSchedulers, __A : Optional[Dict[int, str]] = None, ):
super().__init__()
self.register_modules(transformer=__A, vae=__A, scheduler=__A )
# create a imagenet -> id dictionary for easier use
UpperCAmelCase : Union[str, Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
UpperCAmelCase : Optional[Any] = int(__A )
UpperCAmelCase : Union[str, Any] = dict(sorted(self.labels.items() ) )
def __magic_name__ ( self : Dict, __A : Union[str, List[str]] ):
if not isinstance(__A, __A ):
UpperCAmelCase : List[Any] = list(__A )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Optional[int], __A : List[int], __A : float = 4.0, __A : Optional[Union[torch.Generator, List[torch.Generator]]] = None, __A : int = 5_0, __A : Optional[str] = "pil", __A : bool = True, ):
UpperCAmelCase : str = len(__A )
UpperCAmelCase : Dict = self.transformer.config.sample_size
UpperCAmelCase : List[Any] = self.transformer.config.in_channels
UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size), generator=__A, device=self.device, dtype=self.transformer.dtype, )
UpperCAmelCase : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCAmelCase : Any = torch.tensor(__A, device=self.device ).reshape(-1 )
UpperCAmelCase : Union[str, Any] = torch.tensor([1_0_0_0] * batch_size, device=self.device )
UpperCAmelCase : Optional[int] = torch.cat([class_labels, class_null], 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCAmelCase : Dict = latent_model_input[: len(__A ) // 2]
UpperCAmelCase : Union[str, Any] = torch.cat([half, half], dim=0 )
UpperCAmelCase : Tuple = self.scheduler.scale_model_input(__A, __A )
UpperCAmelCase : int = t
if not torch.is_tensor(__A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCAmelCase : Optional[Any] = latent_model_input.device.type == '''mps'''
if isinstance(__A, __A ):
UpperCAmelCase : List[str] = torch.floataa if is_mps else torch.floataa
else:
UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
UpperCAmelCase : Optional[Any] = torch.tensor([timesteps], dtype=__A, device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCAmelCase : Optional[int] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase : Any = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCAmelCase : Any = self.transformer(
__A, timestep=__A, class_labels=__A ).sample
# perform guidance
if guidance_scale > 1:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCAmelCase , UpperCAmelCase : str = torch.split(__A, len(__A ) // 2, dim=0 )
UpperCAmelCase : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCAmelCase : Union[str, Any] = torch.cat([half_eps, half_eps], dim=0 )
UpperCAmelCase : Optional[Any] = torch.cat([eps, rest], dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCAmelCase , UpperCAmelCase : Dict = torch.split(__A, __A, dim=1 )
else:
UpperCAmelCase : int = noise_pred
# compute previous image: x_t -> x_t-1
UpperCAmelCase : List[str] = self.scheduler.step(__A, __A, __A ).prev_sample
if guidance_scale > 1:
UpperCAmelCase , UpperCAmelCase : Optional[int] = latent_model_input.chunk(2, dim=0 )
else:
UpperCAmelCase : int = latent_model_input
UpperCAmelCase : List[Any] = 1 / self.vae.config.scaling_factor * latents
UpperCAmelCase : Dict = self.vae.decode(__A ).sample
UpperCAmelCase : Optional[Any] = (samples / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : Union[str, Any] = samples.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase : List[str] = self.numpy_to_pil(__A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__A )
| 99 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowerCamelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Optional[Any], *__A : Tuple, **__A : Tuple ):
super().__init__(*__A, **__A )
self.check_model_type(__A )
def __magic_name__ ( self : Union[str, Any], __A : int=None, __A : Tuple=None, __A : Any=None, **__A : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : List[Any] = {}, {}
if padding is not None:
UpperCAmelCase : Optional[int] = padding
if truncation is not None:
UpperCAmelCase : Optional[int] = truncation
if top_k is not None:
UpperCAmelCase : Tuple = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any], __A : Union["Image.Image", str], __A : str = None, **__A : Optional[int] ):
if isinstance(__A, (Image.Image, str) ) and isinstance(__A, __A ):
UpperCAmelCase : int = {'''image''': image, '''question''': question}
else:
UpperCAmelCase : str = image
UpperCAmelCase : Union[str, Any] = super().__call__(__A, **__A )
return results
def __magic_name__ ( self : List[str], __A : Union[str, Any], __A : Tuple=False, __A : List[Any]=False ):
UpperCAmelCase : int = load_image(inputs['''image'''] )
UpperCAmelCase : List[str] = self.tokenizer(
inputs['''question'''], return_tensors=self.framework, padding=__A, truncation=__A )
UpperCAmelCase : Union[str, Any] = self.image_processor(images=__A, return_tensors=self.framework )
model_inputs.update(__A )
return model_inputs
def __magic_name__ ( self : Optional[Any], __A : List[Any] ):
UpperCAmelCase : Optional[int] = self.model(**__A )
return model_outputs
def __magic_name__ ( self : Any, __A : List[str], __A : Union[str, Any]=5 ):
if top_k > self.model.config.num_labels:
UpperCAmelCase : Any = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase : Any = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = probs.topk(__A )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
UpperCAmelCase : str = scores.tolist()
UpperCAmelCase : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__A, __A )]
| 99 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class _SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ :Union[str, Any] = 'convnextv2'
def __init__( self , __A=3 , __A=4 , __A=4 , __A=None , __A=None , __A="gelu" , __A=0.0_2 , __A=1E-12 , __A=0.0 , __A=224 , __A=None , __A=None , **__A , ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ :str = num_channels
lowerCAmelCase_ :Any = patch_size
lowerCAmelCase_ :int = num_stages
lowerCAmelCase_ :Optional[int] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
lowerCAmelCase_ :Optional[int] = [3, 3, 9, 3] if depths is None else depths
lowerCAmelCase_ :Optional[int] = hidden_act
lowerCAmelCase_ :Dict = initializer_range
lowerCAmelCase_ :Tuple = layer_norm_eps
lowerCAmelCase_ :List[str] = drop_path_rate
lowerCAmelCase_ :Union[str, Any] = image_size
lowerCAmelCase_ :List[str] = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase_ :Dict = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 84 |
'''simple docstring'''
from math import isqrt
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(UpperCAmelCase_ ) + 1 ) )
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10**6 ) -> int:
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Optional[int] = 1
__lowerCamelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(UpperCAmelCase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 185 | 0 |
import os
import sys
import unittest
snake_case : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
snake_case : Tuple = os.path.join(git_repo_path, '''src''', '''diffusers''')
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = find_backend(''' if not is_torch_available():''' )
self.assertEqual(_lowerCamelCase , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
a :Union[str, Any] = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(_lowerCamelCase , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
a :int = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(_lowerCamelCase , '''torch_and_transformers_and_onnx''' )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _lowerCamelCase )
self.assertIn('''torch_and_transformers''' , _lowerCamelCase )
self.assertIn('''flax_and_transformers''' , _lowerCamelCase )
self.assertIn('''torch_and_transformers_and_onnx''' , _lowerCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_lowerCamelCase , '''\nCONSTANT = None\n''' )
a :Dict = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_lowerCamelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
a :List[Any] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
a :Union[str, Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
a :Union[str, Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _lowerCamelCase )
| 281 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _snake_case :
def __init__( self , _lowerCamelCase , ):
a :List[str] = parent
a :Dict = 13
a :Optional[int] = 7
a :Optional[Any] = 30
a :Optional[Any] = self.seq_length + self.mem_len
a :Tuple = 15
a :List[str] = True
a :List[Any] = True
a :List[Any] = 99
a :Optional[Any] = [10, 50, 80]
a :Optional[int] = 32
a :List[Any] = 32
a :Dict = 4
a :List[Any] = 8
a :Optional[Any] = 128
a :Dict = 2
a :List[Any] = 2
a :str = None
a :str = 1
a :List[Any] = 0
a :List[str] = 3
a :str = self.vocab_size - 1
a :Optional[Any] = 0.01
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a :Tuple = None
if self.use_labels:
a :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a :Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def SCREAMING_SNAKE_CASE__ ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :int = TFTransfoXLModel(_lowerCamelCase )
a , a :List[Any] = model(_lowerCamelCase ).to_tuple()
a :List[str] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
a , a :Optional[int] = model(_lowerCamelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :str = TFTransfoXLLMHeadModel(_lowerCamelCase )
a , a :Tuple = model(_lowerCamelCase ).to_tuple()
a :Any = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
a , a :Dict = model(_lowerCamelCase ).to_tuple()
a , a :Dict = model([input_ids_a, mems_a] ).to_tuple()
a :str = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
a , a :Any = model(_lowerCamelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Optional[Any] = TFTransfoXLForSequenceClassification(_lowerCamelCase )
a :Any = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = self.prepare_config_and_inputs()
((a) , (a) , (a) , (a)) :Optional[int] = config_and_inputs
a :Union[str, Any] = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class _snake_case ( _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE__ = () if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = TFTransfoXLModelTester(self )
a :str = ConfigTester(self , config_class=_lowerCamelCase , d_embed=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
self.model_tester.set_seed()
a :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
self.model_tester.set_seed()
a :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a , a :Any = self.model_tester.prepare_config_and_inputs_for_common()
a :int = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
a :Any = model_class(_lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
a :Dict = model.get_output_embeddings()
assert isinstance(_lowerCamelCase , tf.keras.layers.Layer )
a :Dict = model.get_bias()
assert name is None
else:
a :int = model.get_output_embeddings()
assert x is None
a :Optional[int] = model.get_bias()
assert name is None
def SCREAMING_SNAKE_CASE__ ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a :List[Any] = TFTransfoXLModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_tf
class _snake_case ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
a :Union[str, Any] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
a :List[Any] = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
a :Optional[Any] = model.generate(_lowerCamelCase , max_length=200 , do_sample=_lowerCamelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCamelCase )
| 281 | 1 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
class _A ( UpperCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[Any] = CLIPConfig
_SCREAMING_SNAKE_CASE : List[Any] = ['''CLIPEncoderLayer''']
def __init__( self , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__(_A )
__UpperCAmelCase : Optional[Any] = CLIPVisionModelWithProjection(config.vision_config )
__UpperCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
__UpperCAmelCase : List[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0.5 , __UpperCAmelCase=0.5 ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.vision_model(_A )[0]
__UpperCAmelCase : Optional[Any] = self.p_head(_A )
__UpperCAmelCase : List[str] = nsfw_detected.flatten()
__UpperCAmelCase : Optional[int] = nsfw_detected > p_threshold
__UpperCAmelCase : List[Any] = nsfw_detected.tolist()
if any(_A ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(_A ):
if nsfw_detected_:
__UpperCAmelCase : Optional[int] = np.zeros(images[idx].shape )
__UpperCAmelCase : Tuple = self.w_head(_A )
__UpperCAmelCase : Any = watermark_detected.flatten()
__UpperCAmelCase : Any = watermark_detected > w_threshold
__UpperCAmelCase : Optional[Any] = watermark_detected.tolist()
if any(_A ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(_A ):
if watermark_detected_:
__UpperCAmelCase : Dict = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 254 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase__ : List[Any] =logging.get_logger(__name__)
def __lowercase ( a__ , a__ , a__ , a__ ) -> Tuple[int, int]:
def constraint_to_multiple_of(a__ , a__ , a__=0 , a__=None ):
__SCREAMING_SNAKE_CASE = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__SCREAMING_SNAKE_CASE = math.floor(val / multiple ) * multiple
if x < min_val:
__SCREAMING_SNAKE_CASE = math.ceil(val / multiple ) * multiple
return x
__SCREAMING_SNAKE_CASE = (output_size, output_size) if isinstance(a__ , a__ ) else output_size
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_image_size(a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output_size
# determine new height and width
__SCREAMING_SNAKE_CASE = output_height / input_height
__SCREAMING_SNAKE_CASE = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__SCREAMING_SNAKE_CASE = scale_width
else:
# fit height
__SCREAMING_SNAKE_CASE = scale_height
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_height * input_height , multiple=a__ )
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_width * input_width , multiple=a__ )
return (new_height, new_width)
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = ['''pixel_values''']
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = False , _A = 1 , _A = True , _A = 1 / 255 , _A = True , _A = None , _A = None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
__SCREAMING_SNAKE_CASE = size if size is not None else {'height': 384, 'width': 384}
__SCREAMING_SNAKE_CASE = get_size_dict(_A )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self , _A , _A , _A = False , _A = 1 , _A = PILImageResampling.BICUBIC , _A = None , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(
_A , output_size=(size['height'], size['width']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def _A ( self , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def _A ( self , _A , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def _A ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(_A )
__SCREAMING_SNAKE_CASE = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(_A ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_A , _A ) for image in images]
__SCREAMING_SNAKE_CASE = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_A ):
__SCREAMING_SNAKE_CASE = target_sizes.numpy()
__SCREAMING_SNAKE_CASE = []
for idx in range(len(_A ) ):
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_A )
__SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
__SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 257 | 0 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> List[Any]:
A = psutil.Process()
A = False
def UpperCamelCase__ ( self ) -> Optional[int]:
A = -1
while True:
A = max(self.process.memory_info().rss ,self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def UpperCamelCase__ ( self ) -> Any:
A = True
A = threading.Thread(target=self.peak_monitor )
A = True
self.thread.start()
def UpperCamelCase__ ( self ) -> List[str]:
A = False
self.thread.join()
return self.cpu_memory_peak
UpperCAmelCase =PeakCPUMemory()
def _A ( ):
"""simple docstring"""
A = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
A = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
A = torch.cuda.memory_allocated(_a )
torch.cuda.reset_peak_memory_stats()
return measures
def _A ( _a : int ):
"""simple docstring"""
A = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
A = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**2_0
A = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**2_0
# GPU mem
for i in range(torch.cuda.device_count() ):
A = (torch.cuda.memory_allocated(_a ) - start_measures[str(_a )]) / 2**2_0
A = (torch.cuda.max_memory_allocated(_a ) - start_measures[str(_a )]) / 2**2_0
return measures
def _A ( _a : int , _a : Dict ):
"""simple docstring"""
print(f'{description}:' )
print(f'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(f'- GPU {i} allocated: {measures[str(_a )]:.2f}MiB' )
A = measures[f'{i}-peak']
print(f'- GPU {i} peak: {peak:.2f}MiB' )
print(f'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(f'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 77 |
"""simple docstring"""
def _A ( _a : Optional[int] ):
"""simple docstring"""
A = []
A = set({"""(""", """[""", """{"""} )
A = set({""")""", """]""", """}"""} )
A = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(_a ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_a ) == 0 or (len(_a ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_a ) == 0
def _A ( ):
"""simple docstring"""
A = input("""Enter sequence of brackets: """ )
if is_balanced(_a ):
print(_a , """is balanced""" )
else:
print(_a , """is not balanced""" )
if __name__ == "__main__":
main()
| 77 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.