code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _A :
def __init__( self : Tuple , _A : str , _A : Dict=13 , _A : str=7 , _A : List[Any]=True , _A : List[str]=True , _A : Any=True , _A : int=True , _A : List[Any]=99 , _A : Dict=32 , _A : Dict=2 , _A : int=4 , _A : int=37 , _A : str="gelu" , _A : Tuple=0.1 , _A : str=0.1 , _A : Dict=512 , _A : Any=16 , _A : int=2 , _A : Any=0.02 , _A : str=False , _A : str=True , _A : int="None" , _A : Optional[int]=3 , _A : Optional[Any]=4 , _A : Any=None , ) -> Optional[int]:
"""simple docstring"""
lowercase : int = parent
lowercase : Dict = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : Optional[Any] = is_training
lowercase : List[str] = use_input_mask
lowercase : Dict = use_token_type_ids
lowercase : List[str] = use_labels
lowercase : str = vocab_size
lowercase : Union[str, Any] = hidden_size
lowercase : List[Any] = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : List[Any] = intermediate_size
lowercase : Tuple = hidden_act
lowercase : List[str] = hidden_dropout_prob
lowercase : str = attention_probs_dropout_prob
lowercase : int = max_position_embeddings
lowercase : int = type_vocab_size
lowercase : str = type_sequence_label_size
lowercase : Tuple = initializer_range
lowercase : Any = num_labels
lowercase : int = num_choices
lowercase : str = relative_attention
lowercase : str = position_biased_input
lowercase : Union[str, Any] = pos_att_type
lowercase : int = scope
def __a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Dict = None
if self.use_input_mask:
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Tuple = None
lowercase : List[Any] = None
lowercase : List[Any] = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[str] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : Tuple , _A : str , _A : Optional[Any] , _A : Dict , _A : Tuple , _A : Dict , _A : Dict , _A : Dict ) -> int:
"""simple docstring"""
lowercase : List[Any] = TFDebertaVaModel(config=_A )
lowercase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : List[str] = [input_ids, input_mask]
lowercase : Union[str, Any] = model(_A )
lowercase : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[str] , _A : str , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = TFDebertaVaForMaskedLM(config=_A )
lowercase : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : int = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : List[Any] , _A : Any , _A : Tuple , _A : int , _A : Optional[Any] , _A : Tuple , _A : Any , _A : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase : Tuple = self.num_labels
lowercase : Any = TFDebertaVaForSequenceClassification(config=_A )
lowercase : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : str = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : str , _A : List[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : int , _A : Optional[int] , _A : List[str] , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[str] = self.num_labels
lowercase : str = TFDebertaVaForTokenClassification(config=_A )
lowercase : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : str , _A : Tuple , _A : str , _A : Union[str, Any] , _A : List[Any] , _A : Tuple , _A : Tuple , _A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = TFDebertaVaForQuestionAnswering(config=_A )
lowercase : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Tuple = config_and_inputs
lowercase : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _A ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Optional[int] = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Union[str, Any] = False
def __a ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase : str = TFDebertaVaModelTester(self )
lowercase : str = ConfigTester(self , config_class=_A , hidden_size=37 )
def __a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __a ( self : str ) -> Dict:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __a ( self : List[str] ) -> int:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(_A )
@require_tf
class _A ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def __a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
@slow
def __a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
lowercase : Dict = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowercase : int = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase : Union[str, Any] = model(_A , attention_mask=_A )[0]
lowercase : Optional[int] = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1E-4 ) | 308 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=10 , UpperCAmelCase=3 , UpperCAmelCase=32 * 4 , UpperCAmelCase=32 * 6 , UpperCAmelCase=4 , UpperCAmelCase=32 , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = is_training
_snake_case = use_auxiliary_loss
_snake_case = num_queries
_snake_case = num_channels
_snake_case = min_size
_snake_case = max_size
_snake_case = num_labels
_snake_case = mask_feature_size
def lowercase (self ) -> str:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCAmelCase )
_snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase )
_snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase ) > 0.5
).float()
_snake_case = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase ) > 0.5).long()
_snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase (self ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase (self ) -> Optional[Any]:
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.prepare_config_and_inputs()
_snake_case = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
_snake_case = output.encoder_hidden_states
_snake_case = output.pixel_decoder_hidden_states
_snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase ) , config.decoder_config.decoder_layers )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]:
with torch.no_grad():
_snake_case = MaskFormerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase )
_snake_case = model(UpperCAmelCase , output_hidden_states=UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase , UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
_snake_case = MaskFormerForInstanceSegmentation(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
def comm_check_on_output(UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase )
_snake_case = model(UpperCAmelCase )
comm_check_on_output(UpperCAmelCase )
_snake_case = model(
pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase )
comm_check_on_output(UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCAmelCase_ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> int:
_snake_case = MaskFormerModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def lowercase (self ) -> int:
self.config_tester.run_common_tests()
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase )
def lowercase (self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def lowercase (self ) -> Optional[Any]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def lowercase (self ) -> Optional[int]:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def lowercase (self ) -> int:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def lowercase (self ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowercase (self ) -> Optional[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase (self ) -> Tuple:
pass
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
@slow
def lowercase (self ) -> int:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_snake_case = MaskFormerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowercase (self ) -> Tuple:
_snake_case = (self.model_tester.min_size,) * 2
_snake_case = {
"""pixel_values""": torch.randn((2, 3, *size) , device=UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=UpperCAmelCase ),
"""class_labels""": torch.zeros(2 , 10 , device=UpperCAmelCase ).long(),
}
_snake_case = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCAmelCase )
_snake_case = model(**UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowercase (self ) -> Dict:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase )
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase ).to(UpperCAmelCase )
_snake_case = model(**UpperCAmelCase , output_attentions=UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase (self ) -> Tuple:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_snake_case = self.all_model_classes[1]
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
_snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase ).loss
loss.backward()
def lowercase (self ) -> List[str]:
# only MaskFormerForInstanceSegmentation has the loss
_snake_case = self.all_model_classes[1]
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = True
_snake_case = True
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
_snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase )
_snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase = 1E-4
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase (self ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def lowercase (self ) -> str:
_snake_case = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(UpperCAmelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
_snake_case = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
_snake_case = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
_snake_case = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
_snake_case = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> List[str]:
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
_snake_case = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
_snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> List[Any]:
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(UpperCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
_snake_case = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> Tuple:
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
_snake_case = inputs["""pixel_values"""].to(UpperCAmelCase )
_snake_case = [el.to(UpperCAmelCase ) for el in inputs["""mask_labels"""]]
_snake_case = [el.to(UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
self.assertTrue(outputs.loss is not None ) | 341 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30_522, type=int)
_lowerCAmelCase : Tuple = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
_lowerCAmelCase : Dict = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
_lowerCAmelCase : Tuple = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCAmelCase : List[Any] = [0] * args.vocab_size
for k, v in counter.items():
_lowerCAmelCase : int = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 370 |
"""simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" )
UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 298 | 0 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_UpperCamelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 115 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def A_ ( a , a , a = 1 / sqrt(2 ) ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ : List[Any] = sin(a )
SCREAMING_SNAKE_CASE_ : List[str] = cos(a )
SCREAMING_SNAKE_CASE_ : Tuple = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ : List[Any] = (1 - _cos) / 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 - _cos
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 + alpha
SCREAMING_SNAKE_CASE_ : List[str] = -2 * _cos
SCREAMING_SNAKE_CASE_ : Any = 1 - alpha
SCREAMING_SNAKE_CASE_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A_ ( a , a , a = 1 / sqrt(2 ) ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ : List[str] = sin(a )
SCREAMING_SNAKE_CASE_ : Tuple = cos(a )
SCREAMING_SNAKE_CASE_ : Any = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ : int = (1 + _cos) / 2
SCREAMING_SNAKE_CASE_ : Optional[Any] = -1 - _cos
SCREAMING_SNAKE_CASE_ : Tuple = 1 + alpha
SCREAMING_SNAKE_CASE_ : Optional[int] = -2 * _cos
SCREAMING_SNAKE_CASE_ : Any = 1 - alpha
SCREAMING_SNAKE_CASE_ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A_ ( a , a , a = 1 / sqrt(2 ) ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ : Optional[Any] = sin(a )
SCREAMING_SNAKE_CASE_ : Any = cos(a )
SCREAMING_SNAKE_CASE_ : Tuple = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _sin / 2
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = -ba
SCREAMING_SNAKE_CASE_ : int = 1 + alpha
SCREAMING_SNAKE_CASE_ : Union[str, Any] = -2 * _cos
SCREAMING_SNAKE_CASE_ : int = 1 - alpha
SCREAMING_SNAKE_CASE_ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A_ ( a , a , a = 1 / sqrt(2 ) ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ : Any = sin(a )
SCREAMING_SNAKE_CASE_ : Any = cos(a )
SCREAMING_SNAKE_CASE_ : int = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ : List[str] = 1 - alpha
SCREAMING_SNAKE_CASE_ : Optional[int] = -2 * _cos
SCREAMING_SNAKE_CASE_ : Dict = 1 + alpha
SCREAMING_SNAKE_CASE_ : List[str] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def A_ ( a , a , a , a = 1 / sqrt(2 ) , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ : Dict = sin(a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = cos(a )
SCREAMING_SNAKE_CASE_ : Tuple = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1_0 ** (gain_db / 4_0)
SCREAMING_SNAKE_CASE_ : Tuple = 1 + alpha * big_a
SCREAMING_SNAKE_CASE_ : Dict = -2 * _cos
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1 - alpha * big_a
SCREAMING_SNAKE_CASE_ : str = 1 + alpha / big_a
SCREAMING_SNAKE_CASE_ : Tuple = -2 * _cos
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 - alpha / big_a
SCREAMING_SNAKE_CASE_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A_ ( a , a , a , a = 1 / sqrt(2 ) , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ : Any = sin(a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = cos(a )
SCREAMING_SNAKE_CASE_ : str = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ : Optional[int] = 1_0 ** (gain_db / 4_0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ : str = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ : Optional[int] = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ : Any = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ : List[Any] = 2 * sqrt(a ) * alpha
SCREAMING_SNAKE_CASE_ : Union[str, Any] = big_a * (pmc + aaa)
SCREAMING_SNAKE_CASE_ : int = 2 * big_a * mpc
SCREAMING_SNAKE_CASE_ : Dict = big_a * (pmc - aaa)
SCREAMING_SNAKE_CASE_ : int = ppmc + aaa
SCREAMING_SNAKE_CASE_ : Any = -2 * pmpc
SCREAMING_SNAKE_CASE_ : Any = ppmc - aaa
SCREAMING_SNAKE_CASE_ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A_ ( a , a , a , a = 1 / sqrt(2 ) , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ : int = sin(a )
SCREAMING_SNAKE_CASE_ : Any = cos(a )
SCREAMING_SNAKE_CASE_ : List[str] = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ : Dict = 1_0 ** (gain_db / 4_0)
SCREAMING_SNAKE_CASE_ : List[str] = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ : List[str] = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ : int = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ : List[str] = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ : Any = 2 * sqrt(a ) * alpha
SCREAMING_SNAKE_CASE_ : List[Any] = big_a * (ppmc + aaa)
SCREAMING_SNAKE_CASE_ : Optional[Any] = -2 * big_a * pmpc
SCREAMING_SNAKE_CASE_ : int = big_a * (ppmc - aaa)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pmc + aaa
SCREAMING_SNAKE_CASE_ : List[str] = 2 * mpc
SCREAMING_SNAKE_CASE_ : Any = pmc - aaa
SCREAMING_SNAKE_CASE_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 253 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase, _UpperCamelCase=False ) ->Optional[Any]:
"""simple docstring"""
lowercase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowercase : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=False ) ->Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : int = ''''''
else:
lowercase : Tuple = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : int = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
lowercase : Optional[int] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowercase : Optional[Any] = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : str = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Dict = in_proj_bias[-config.hidden_size :]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : Tuple = dct.pop(_UpperCamelCase )
lowercase : List[str] = val
def __lowercase ( ) ->Any:
"""simple docstring"""
lowercase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : List[Any] = Image.open(requests.get(_UpperCamelCase, stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
lowercase : List[Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowercase : Tuple = 1000
lowercase : Any = '''huggingface/label-files'''
lowercase : Any = '''imagenet-1k-id2label.json'''
lowercase : Any = json.load(open(hf_hub_download(_UpperCamelCase, _UpperCamelCase, repo_type='''dataset''' ), '''r''' ) )
lowercase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
lowercase : Tuple = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
lowercase : Any = int(deit_name[-6:-4] )
lowercase : Union[str, Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
lowercase : Optional[int] = 192
lowercase : Union[str, Any] = 768
lowercase : Any = 12
lowercase : Optional[int] = 3
elif deit_name[9:].startswith('''small''' ):
lowercase : List[str] = 384
lowercase : List[Any] = 1536
lowercase : Union[str, Any] = 12
lowercase : Any = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
lowercase : str = 1024
lowercase : Dict = 4096
lowercase : Tuple = 24
lowercase : Any = 16
# load original model from timm
lowercase : Dict = timm.create_model(_UpperCamelCase, pretrained=_UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase : Any = timm_model.state_dict()
lowercase : Optional[int] = create_rename_keys(_UpperCamelCase, _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# load HuggingFace model
lowercase : Optional[Any] = DeiTForImageClassificationWithTeacher(_UpperCamelCase ).eval()
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
lowercase : Tuple = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowercase : str = DeiTImageProcessor(size=_UpperCamelCase, crop_size=config.image_size )
lowercase : int = image_processor(images=prepare_img(), return_tensors='''pt''' )
lowercase : Tuple = encoding['''pixel_values''']
lowercase : Union[str, Any] = model(_UpperCamelCase )
lowercase : List[str] = timm_model(_UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCamelCase, outputs.logits, atol=1e-3 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 173 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a = 16
__a = 32
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 16 ) ->List[Any]:
"""simple docstring"""
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase : List[Any] = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase : List[Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=_UpperCamelCase, max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase : Union[str, Any] = datasets.map(
_UpperCamelCase, batched=_UpperCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase : Union[str, Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase : Tuple = 16
elif accelerator.mixed_precision != "no":
lowercase : str = 8
else:
lowercase : List[str] = None
return tokenizer.pad(
_UpperCamelCase, padding='''longest''', max_length=_UpperCamelCase, pad_to_multiple_of=_UpperCamelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
lowercase : int = DataLoader(
tokenized_datasets['''train'''], shuffle=_UpperCamelCase, collate_fn=_UpperCamelCase, batch_size=_UpperCamelCase )
lowercase : str = DataLoader(
tokenized_datasets['''validation'''], shuffle=_UpperCamelCase, collate_fn=_UpperCamelCase, batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a = mocked_dataloaders # noqa: F811
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', _UpperCamelCase ) == "1":
lowercase : Tuple = 2
# New Code #
lowercase : Optional[int] = int(args.gradient_accumulation_steps )
lowercase : Optional[int] = int(args.local_sgd_steps )
# Initialize accelerator
lowercase : Tuple = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=_UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : Dict = config['''lr''']
lowercase : List[str] = int(config['''num_epochs'''] )
lowercase : str = int(config['''seed'''] )
lowercase : str = int(config['''batch_size'''] )
lowercase : Any = evaluate.load('''glue''', '''mrpc''' )
set_seed(_UpperCamelCase )
lowercase , lowercase : Dict = get_dataloaders(_UpperCamelCase, _UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : int = model.to(accelerator.device )
# Instantiate optimizer
lowercase : Any = AdamW(params=model.parameters(), lr=_UpperCamelCase )
# Instantiate scheduler
lowercase : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase, num_warmup_steps=100, num_training_steps=(len(_UpperCamelCase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] = accelerator.prepare(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=_UpperCamelCase, model=_UpperCamelCase, local_sgd_steps=_UpperCamelCase, enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCamelCase ):
lowercase : int = model(**_UpperCamelCase )
lowercase : Optional[int] = output.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : Optional[int] = model(**_UpperCamelCase )
lowercase : Optional[Any] = outputs.logits.argmax(dim=-1 )
lowercase , lowercase : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase, references=_UpperCamelCase, )
lowercase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", _UpperCamelCase )
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=_UpperCamelCase, default=_UpperCamelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''', type=_UpperCamelCase, default=1, help='''The number of minibatches to be ran before gradients are accumulated.''', )
parser.add_argument(
'''--local_sgd_steps''', type=_UpperCamelCase, default=8, help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
lowercase : List[Any] = parser.parse_args()
lowercase : List[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase, _UpperCamelCase )
if __name__ == "__main__":
main()
| 173 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 182 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE__ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __UpperCamelCase )-> str:
if "://" in dataset_path:
UpperCamelCase = dataset_path.split("""://""" )[1]
return dataset_path
def lowercase__ ( __UpperCamelCase )-> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCamelCase = not is_remote_filesystem(__UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) )
else:
fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase )
def lowercase__ ( )-> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = threading.Lock()
| 321 | 0 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = "https://openaipublic.azureedge.net/jukebox/models/"
__A = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 1_0:
__lowerCAmelCase: Optional[int] = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 1_0:
__lowerCAmelCase: List[str] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 1_0:
__lowerCAmelCase: Dict = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 1_0:
__lowerCAmelCase: List[Any] = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
__lowerCAmelCase: int = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
__lowerCAmelCase: Tuple = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__lowerCAmelCase: Union[str, Any] = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
__lowerCAmelCase: str = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: Union[str, Any] = {}
import re
__lowerCAmelCase: List[Any] = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__lowerCAmelCase: List[Any] = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase: List[str] = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase: Tuple = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__lowerCAmelCase: Union[str, Any] = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase: Dict = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase: str = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
__lowerCAmelCase: Any = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase: str = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case__ ):
__lowerCAmelCase: int = re_encoder_block_conv_in.match(snake_case__ )
__lowerCAmelCase: Optional[Any] = regex_match.groups()
__lowerCAmelCase: List[str] = int(groups[2] ) * 2 + int(groups[3] )
__lowerCAmelCase: Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
__lowerCAmelCase: List[str] = re_encoder_block_conv_in.sub(snake_case__ , snake_case__ )
elif re_encoder_block_resnet.fullmatch(snake_case__ ):
__lowerCAmelCase: Union[str, Any] = re_encoder_block_resnet.match(snake_case__ )
__lowerCAmelCase: Dict = regex_match.groups()
__lowerCAmelCase: str = int(groups[2] ) * 2 + int(groups[3] )
__lowerCAmelCase: str = {'1': 1, '3': 2}[groups[-2]]
__lowerCAmelCase: Optional[int] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
__lowerCAmelCase: Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowerCAmelCase: Optional[int] = prefix + resnet_block
__lowerCAmelCase: List[Any] = re_encoder_block_resnet.sub(snake_case__ , snake_case__ )
elif re_encoder_block_proj_out.fullmatch(snake_case__ ):
__lowerCAmelCase: str = re_encoder_block_proj_out.match(snake_case__ )
__lowerCAmelCase: Tuple = regex_match.groups()
__lowerCAmelCase: Dict = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
__lowerCAmelCase: str = re_encoder_block_proj_out.sub(snake_case__ , snake_case__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case__ ):
__lowerCAmelCase: int = re_decoder_block_conv_out.match(snake_case__ )
__lowerCAmelCase: Optional[int] = regex_match.groups()
__lowerCAmelCase: List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCAmelCase: Union[str, Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
__lowerCAmelCase: Tuple = re_decoder_block_conv_out.sub(snake_case__ , snake_case__ )
elif re_decoder_block_resnet.fullmatch(snake_case__ ):
__lowerCAmelCase: Any = re_decoder_block_resnet.match(snake_case__ )
__lowerCAmelCase: List[str] = regex_match.groups()
__lowerCAmelCase: Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCAmelCase: Union[str, Any] = {'1': 1, '3': 2}[groups[-2]]
__lowerCAmelCase: List[str] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
__lowerCAmelCase: Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowerCAmelCase: Optional[int] = prefix + resnet_block
__lowerCAmelCase: Any = re_decoder_block_resnet.sub(snake_case__ , snake_case__ )
elif re_decoder_block_proj_in.fullmatch(snake_case__ ):
__lowerCAmelCase: Optional[int] = re_decoder_block_proj_in.match(snake_case__ )
__lowerCAmelCase: Optional[Any] = regex_match.groups()
__lowerCAmelCase: Dict = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
__lowerCAmelCase: Any = re_decoder_block_proj_in.sub(snake_case__ , snake_case__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case__ ):
__lowerCAmelCase: Union[str, Any] = re_prior_cond_conv_out.match(snake_case__ )
__lowerCAmelCase: Tuple = regex_match.groups()
__lowerCAmelCase: Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCAmelCase: int = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
__lowerCAmelCase: Union[str, Any] = re_prior_cond_conv_out.sub(snake_case__ , snake_case__ )
elif re_prior_cond_resnet.fullmatch(snake_case__ ):
__lowerCAmelCase: Union[str, Any] = re_prior_cond_resnet.match(snake_case__ )
__lowerCAmelCase: int = regex_match.groups()
__lowerCAmelCase: List[str] = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCAmelCase: Union[str, Any] = {'1': 1, '3': 2}[groups[-2]]
__lowerCAmelCase: Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
__lowerCAmelCase: int = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowerCAmelCase: Any = prefix + resnet_block
__lowerCAmelCase: Union[str, Any] = re_prior_cond_resnet.sub(snake_case__ , snake_case__ )
elif re_prior_cond_proj_in.fullmatch(snake_case__ ):
__lowerCAmelCase: List[Any] = re_prior_cond_proj_in.match(snake_case__ )
__lowerCAmelCase: Any = regex_match.groups()
__lowerCAmelCase: Dict = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
__lowerCAmelCase: Dict = re_prior_cond_proj_in.sub(snake_case__ , snake_case__ )
# keep original key
else:
__lowerCAmelCase: Tuple = original_key
__lowerCAmelCase: Dict = replace_key(snake_case__ )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
__lowerCAmelCase: int = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
__lowerCAmelCase: Optional[int] = original_key
__lowerCAmelCase: Dict = original_key
__lowerCAmelCase: Any = value
return new_dict
@torch.no_grad()
def a__ ( __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) -> Dict:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
__lowerCAmelCase: Dict = requests.get(F"{PREFIX}{file}" , allow_redirects=snake_case__ )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=snake_case__ )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
__lowerCAmelCase: str = MODEL_MAPPING[model_name.split("/" )[-1]]
__lowerCAmelCase: int = JukeboxConfig.from_pretrained(snake_case__ )
__lowerCAmelCase: Optional[Any] = JukeboxModel(snake_case__ )
__lowerCAmelCase: Optional[Any] = []
__lowerCAmelCase: List[Any] = {}
for i, dict_name in enumerate(snake_case__ ):
__lowerCAmelCase: Tuple = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
__lowerCAmelCase: Optional[int] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
__lowerCAmelCase: Any = old_dic[k]
elif k.endswith(".w" ):
__lowerCAmelCase: List[str] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__lowerCAmelCase: Any = old_dic[k]
else:
__lowerCAmelCase: str = old_dic[k]
__lowerCAmelCase: Optional[int] = 'vqvae' if i == 0 else F"priors.{3 - i}"
__lowerCAmelCase: Optional[int] = fix_jukebox_keys(snake_case__ , model.state_dict() , snake_case__ , snake_case__ )
weight_dict.append(snake_case__ )
__lowerCAmelCase: List[str] = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case__ )
for i in range(len(snake_case__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(snake_case__ , snake_case__ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
return weight_dict
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
__A = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 365 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """data2vec-vision"""
def __init__( self : Optional[int] , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : str=1_2 , UpperCamelCase__ : Optional[Any]=3_0_7_2 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : str=1e-12 , UpperCamelCase__ : Dict=2_2_4 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=[3, 5, 7, 1_1] , UpperCamelCase__ : List[str]=[1, 2, 3, 6] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Any=0.4 , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[int]=2_5_5 , **UpperCamelCase__ : Dict , )-> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
__lowerCAmelCase: List[str] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: Optional[int] = intermediate_size
__lowerCAmelCase: int = hidden_act
__lowerCAmelCase: Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase: Any = attention_probs_dropout_prob
__lowerCAmelCase: Dict = initializer_range
__lowerCAmelCase: Any = layer_norm_eps
__lowerCAmelCase: Union[str, Any] = image_size
__lowerCAmelCase: Tuple = patch_size
__lowerCAmelCase: List[str] = num_channels
__lowerCAmelCase: Optional[Any] = use_mask_token
__lowerCAmelCase: str = use_absolute_position_embeddings
__lowerCAmelCase: Optional[int] = use_relative_position_bias
__lowerCAmelCase: str = use_shared_relative_position_bias
__lowerCAmelCase: Union[str, Any] = layer_scale_init_value
__lowerCAmelCase: Any = drop_path_rate
__lowerCAmelCase: Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase: int = out_indices
__lowerCAmelCase: Any = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase: List[Any] = use_auxiliary_head
__lowerCAmelCase: int = auxiliary_loss_weight
__lowerCAmelCase: Dict = auxiliary_channels
__lowerCAmelCase: Any = auxiliary_num_convs
__lowerCAmelCase: Any = auxiliary_concat_input
__lowerCAmelCase: Any = semantic_loss_ignore_index
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Dict = version.parse("""1.11""" )
@property
def lowercase_ ( self : Optional[Any])-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def lowercase_ ( self : Any)-> float:
'''simple docstring'''
return 1e-4
| 108 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __snake_case :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=None , __UpperCamelCase=2 , ) -> List[str]:
'''simple docstring'''
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : Optional[int] = image_size
snake_case__ : Union[str, Any] = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : Tuple = is_training
snake_case__ : Dict = use_labels
snake_case__ : List[str] = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : Tuple = type_sequence_label_size
snake_case__ : List[Any] = initializer_range
snake_case__ : Dict = scope
snake_case__ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case__ : Optional[int] = (image_size // patch_size) ** 2
snake_case__ : Dict = num_patches + 2
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Optional[Any] = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Any = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> List[Any]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = TFDeiTModel(config=SCREAMING_SNAKE_CASE__ )
snake_case__ : int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
snake_case__ : str = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ )
snake_case__ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ : Optional[Any] = 1
snake_case__ : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__ )
snake_case__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
'''simple docstring'''
snake_case__ : Dict = self.type_sequence_label_size
snake_case__ : Optional[int] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__ )
snake_case__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : Any = 1
snake_case__ : List[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__ )
snake_case__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = self.prepare_config_and_inputs()
snake_case__ : Tuple = config_and_inputs
snake_case__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( lowercase__ ,lowercase__ ,unittest.TestCase ):
__lowerCamelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowerCamelCase = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = TFDeiTModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __a ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __a ( self ) -> Any:
'''simple docstring'''
pass
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , tf.keras.layers.Dense ) )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
snake_case__ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Any = [*signature.parameters.keys()]
snake_case__ : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> List[str]:
'''simple docstring'''
snake_case__ : Tuple = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __a ( self ) -> int:
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : int = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( ) -> str:
snake_case__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def __a ( self ) -> int:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Dict = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
snake_case__ : List[Any] = self.default_image_processor
snake_case__ : str = prepare_img()
snake_case__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
# forward pass
snake_case__ : List[Any] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
snake_case__ : Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
snake_case__ : Tuple = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 143 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
a_ , a_ , a_ , a_ : Union[str, Any] = hidden_states.shape
a_ : List[str] = jax.image.resize(
SCREAMING_SNAKE_CASE__ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
a_ : Any = self.conv(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
a_ : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a_ : str = self.conv(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : int = None
snake_case__ : float = 0.0
snake_case__ : bool = None
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = self.in_channels if self.out_channels is None else self.out_channels
a_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
a_ : Any = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a_ : Optional[int] = nn.Dense(SCREAMING_SNAKE_CASE__ , dtype=self.dtype )
a_ : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
a_ : int = nn.Dropout(self.dropout_prob )
a_ : Optional[Any] = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a_ : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a_ : List[Any] = None
if use_nin_shortcut:
a_ : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> int:
a_ : List[Any] = hidden_states
a_ : Any = self.norma(SCREAMING_SNAKE_CASE__ )
a_ : Any = nn.swish(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE__ )
a_ : int = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE__ ) )
a_ : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , 1 )
a_ : Optional[int] = hidden_states + temb
a_ : List[str] = self.norma(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = nn.swish(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.dropout(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = self.conva(SCREAMING_SNAKE_CASE__ )
if self.conv_shortcut is not None:
a_ : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE__ )
return hidden_states + residual
| 32 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=None ) -> Union[str, Any]:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
A__ = nn.Parameter(SCREAMING_SNAKE_CASE__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
A__ = nn.Parameter(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
'''simple docstring'''
A__ = np.asarray(weights[0] )
A__ = np.asarray(weights[1] )
A__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 , SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 , 1 ) , )
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ = np.asarray(weights[0] )
A__ = np.asarray(weights[1] )
A__ = np.asarray(weights[2] )
A__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 , SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 , 1 ) , )
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
'''simple docstring'''
A__ = weights[0][0][0]
A__ = np.asarray(layer_norm_a[0] )
A__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# lsh weights + output
A__ = weights[0][1]
if len(SCREAMING_SNAKE_CASE__ ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE__ , torch_block.attention , SCREAMING_SNAKE_CASE__ )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE__ , torch_block.attention , SCREAMING_SNAKE_CASE__ )
# intermediate weighs
A__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE__ ) == 4:
A__ = intermediate_weights[2]
# layernorm 2
A__ = np.asarray(intermediate_weights[0][0] )
A__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# intermediate dense
A__ = np.asarray(intermediate_weights[1][0] )
A__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# intermediate out
A__ = np.asarray(intermediate_weights[4][0] )
A__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
'''simple docstring'''
A__ = torch_model.reformer
# word embeds
A__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE__ ):
A__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
A__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
A__ = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ) )
A__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
A__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# output layer norm
A__ = np.asarray(weights[7][0] )
A__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# output embeddings
A__ = np.asarray(weights[9][0] )
A__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ) -> Any:
'''simple docstring'''
A__ = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'Building PyTorch model from configuration: {config}' )
A__ = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
A__ = pickle.load(SCREAMING_SNAKE_CASE__ )['weights']
set_model_weights_in_torch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 282 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase_ = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase_ = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
lowercase_ = "zero2"
lowercase_ = "zero3"
lowercase_ = [ZEROa, ZEROa]
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
'''simple docstring'''
A__ = parameterized.to_safe_name('_'.join(str(SCREAMING_SNAKE_CASE__ ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
lowercase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class A ( _UpperCAmelCase ):
"""simple docstring"""
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : Any )-> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
@require_torch_multi_gpu
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : Union[str, Any],lowercase_ : Optional[Any],lowercase_ : List[Any] )-> int:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : List[Any] )-> Any:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
@require_torch_multi_gpu
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : Dict,lowercase_ : Optional[Any],lowercase_ : List[Any] )-> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
def snake_case__ ( self : Tuple,lowercase_ : Any )-> Union[str, Any]:
'''simple docstring'''
pass
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : str,lowercase_ : int = 1_0,lowercase_ : bool = True,lowercase_ : bool = True,lowercase_ : bool = True,)-> Union[str, Any]:
'''simple docstring'''
A__ = models[model]
A__ = self.run_trainer(
stage=lowercase_,model_name=lowercase_,eval_steps=lowercase_,num_train_epochs=1,distributed=lowercase_,fpaa=lowercase_,)
self.do_checks(lowercase_ )
return output_dir
def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : str,lowercase_ : int = 1_0,lowercase_ : int = 1,lowercase_ : bool = True,lowercase_ : bool = True,)-> Any:
'''simple docstring'''
A__ = self.get_auto_remove_tmp_dir('./xxx',after=lowercase_ )
A__ = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowercase_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A__ = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
A__ = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
A__ = self.get_launcher(lowercase_ )
A__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase_,env=self.get_env() )
return output_dir
def snake_case__ ( self : Any,lowercase_ : int=False )-> Tuple:
'''simple docstring'''
A__ = min(2,get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 282 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase = 16
UpperCAmelCase = 32
def __UpperCamelCase ( lowercase__ : Accelerator, lowercase__ : int = 16, lowercase__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(lowercase__ )
__lowercase =load_dataset('glue', 'mrpc' )
def tokenize_function(lowercase__ : Any ):
# max_length=None => use the model max length (it's actually the default)
__lowercase =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowercase__, max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase =datasets.map(
lowercase__, batched=lowercase__, remove_columns=['idx', 'sentence1', 'sentence2'], load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowercase__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__, padding='max_length', max_length=1_28, return_tensors='pt' )
return tokenizer.pad(lowercase__, padding='longest', return_tensors='pt' )
# Instantiate dataloaders.
__lowercase =DataLoader(
tokenized_datasets['train'], shuffle=lowercase__, collate_fn=lowercase__, batch_size=lowercase__ )
__lowercase =DataLoader(
tokenized_datasets['validation'], shuffle=lowercase__, collate_fn=lowercase__, batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def __UpperCamelCase ( lowercase__ : Any, lowercase__ : Dict, lowercase__ : List[str], lowercase__ : List[Any] ):
'''simple docstring'''
model.eval()
__lowercase =0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase =model(**lowercase__ )
__lowercase =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowercase , __lowercase =accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
__lowercase =predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowercase =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__, references=lowercase__, )
__lowercase =metric.compute()
return eval_metric["accuracy"]
def __UpperCamelCase ( lowercase__ : str, lowercase__ : Optional[int] ):
'''simple docstring'''
__lowercase =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase =config['lr']
__lowercase =int(config['num_epochs'] )
__lowercase =int(config['seed'] )
__lowercase =int(config['batch_size'] )
__lowercase =args.model_name_or_path
set_seed(lowercase__ )
__lowercase , __lowercase =get_dataloaders(lowercase__, lowercase__, lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase =AutoModelForSequenceClassification.from_pretrained(lowercase__, return_dict=lowercase__ )
# Instantiate optimizer
__lowercase =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase =optimizer_cls(params=model.parameters(), lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
__lowercase =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__lowercase =1
__lowercase =(len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase =get_linear_schedule_with_warmup(
optimizer=lowercase__, num_warmup_steps=0, num_training_steps=lowercase__, )
else:
__lowercase =DummyScheduler(lowercase__, total_num_steps=lowercase__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase =accelerator.prepare(
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ )
# We need to keep track of how many total steps we have iterated over
__lowercase =0
# We also need to keep track of the stating epoch so files are named properly
__lowercase =0
__lowercase =evaluate.load('glue', 'mrpc' )
__lowercase =num_epochs
if args.partial_train_epoch is not None:
__lowercase =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowercase =args.resume_from_checkpoint.split('epoch_' )[1]
__lowercase =''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowercase =int(lowercase__ ) + 1
__lowercase =evaluation_loop(lowercase__, lowercase__, lowercase__, lowercase__ )
accelerator.print('resumed checkpoint performance:', lowercase__ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:', lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:', optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir, F'''state_{starting_epoch-1}.json''' ), 'r' ) as f:
__lowercase =json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowercase ={}
for epoch in range(lowercase__, lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
__lowercase =model(**lowercase__ )
__lowercase =outputs.loss
__lowercase =loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowercase =F'''epoch_{epoch}'''
__lowercase =os.path.join(args.output_dir, lowercase__ )
accelerator.save_state(lowercase__ )
__lowercase =evaluation_loop(lowercase__, lowercase__, lowercase__, lowercase__ )
__lowercase =accuracy
__lowercase =lr_scheduler.get_lr()[0]
__lowercase =optimizer.param_groups[0]['lr']
__lowercase =epoch
__lowercase =overall_step
accelerator.print(F'''epoch {epoch}:''', lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F'''state_{epoch}.json''' ), 'w' ) as f:
json.dump(lowercase__, lowercase__ )
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path', type=lowercase__, default='bert-base-cased', help='Path to pretrained model or model identifier from huggingface.co/models.', required=lowercase__, )
parser.add_argument(
'--output_dir', type=lowercase__, default='.', help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.', )
parser.add_argument(
'--resume_from_checkpoint', type=lowercase__, default=lowercase__, help='If the training should continue from a checkpoint folder.', )
parser.add_argument(
'--partial_train_epoch', type=lowercase__, default=lowercase__, help='If passed, the training will stop after this number of epochs.', )
parser.add_argument(
'--num_epochs', type=lowercase__, default=2, help='Number of train epochs.', )
__lowercase =parser.parse_args()
__lowercase ={'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowercase__, lowercase__ )
if __name__ == "__main__":
main()
| 141 |
'''simple docstring'''
import math
class lowerCAmelCase :
def snake_case ( self : Optional[int] , __lowercase : list[list[float]] , __lowercase : list[int] ):
"""simple docstring"""
__lowercase =0.0
__lowercase =0.0
for i in range(len(__lowercase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def snake_case ( self : Union[str, Any] , __lowercase : list[list[int | float]] , __lowercase : list[int] , __lowercase : int , __lowercase : float ):
"""simple docstring"""
for i in range(len(__lowercase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__lowercase =[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__lowercase =SelfOrganizingMap()
__lowercase =3
__lowercase =0.5
for _ in range(lowercase__ ):
for j in range(len(lowercase__ ) ):
# training sample
__lowercase =training_samples[j]
# Compute the winning vector
__lowercase =self_organizing_map.get_winner(lowercase__, lowercase__ )
# Update the winning vector
__lowercase =self_organizing_map.update(lowercase__, lowercase__, lowercase__, lowercase__ )
# classify test sample
__lowercase =[0, 0, 0, 1]
__lowercase =self_organizing_map.get_winner(lowercase__, lowercase__ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 141 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_a = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_a = Features({'image': Image()} )
_a = Features({'labels': ClassLabel} )
_a = "image"
_a = "labels"
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , _snake_case ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
UpperCamelCase__ :Dict = copy.deepcopy(self )
UpperCamelCase__ :List[Any] = self.label_schema.copy()
UpperCamelCase__ :Optional[int] = features[self.label_column]
UpperCamelCase__ :Union[str, Any] = label_schema
return task_template
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
} | 351 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase__ :List[Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = logging.get_verbosity()
UpperCamelCase__ :Union[str, Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
UpperCamelCase__ :Optional[Any] = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning(UpperCamelCase_ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning(UpperCamelCase_ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning(UpperCamelCase_ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(UpperCamelCase_ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase__ :Tuple = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
UpperCamelCase__ :Any = os.getenv('''TRANSFORMERS_VERBOSITY''' , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = logging.log_levels[env_level_str]
UpperCamelCase__ :int = logging.get_verbosity()
self.assertEqual(
UpperCamelCase_ , UpperCamelCase_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase__ :Union[str, Any] = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ :Dict = logging.logging.getLogger()
with CaptureLogger(UpperCamelCase_ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ :Optional[int] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
UpperCamelCase__ :int = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning_advice(UpperCamelCase_ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning_advice(UpperCamelCase_ )
self.assertEqual(cl.out , msg + '''\n''' )
def a ( ) -> str:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled() | 219 | 0 |
from random import randint, random
def lowerCAmelCase_ ( __a , __a , __a , __a = False , __a = False , __a = 5 , ) -> list:
"""simple docstring"""
lowerCamelCase__: Tuple =[[-1] * number_of_cells] # Create a highway without any car
lowerCamelCase__: Dict =0
lowerCamelCase__: Any =max(__a , 0 )
while i < number_of_cells:
lowerCamelCase__: str =(
randint(0 , __a ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
lowerCamelCase__: Optional[int] =highway_now[car_index + 1 :]
for cell in range(len(__a ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__a , -1 )
def lowerCAmelCase_ ( __a , __a , __a ) -> list:
"""simple docstring"""
lowerCamelCase__: Optional[int] =len(__a )
# Beforce calculations, the highway is empty
lowerCamelCase__: Dict =[-1] * number_of_cells
for car_index in range(__a ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowerCamelCase__: int =min(highway_now[car_index] + 1 , __a )
# Number of empty cell before the next car
lowerCamelCase__: Union[str, Any] =get_distance(__a , __a ) - 1
# We can't have the car causing an accident
lowerCamelCase__: Dict =min(next_highway[car_index] , __a )
if random() < probability:
# Randomly, a driver will slow down
lowerCamelCase__: int =max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> list:
"""simple docstring"""
lowerCamelCase__: Dict =len(highway[0] )
for i in range(__a ):
lowerCamelCase__: Dict =update(highway[i] , __a , __a )
lowerCamelCase__: List[str] =[-1] * number_of_cells
for car_index in range(__a ):
lowerCamelCase__: Any =next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowerCamelCase__: Dict =(car_index + speed) % number_of_cells
# Commit the change of position
lowerCamelCase__: Optional[Any] =speed
highway.append(__a )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | """simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase__:
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
return None
class UpperCamelCase__:
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
return None
class UpperCamelCase__( unittest.TestCase ):
lowerCAmelCase__ : Tuple = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case__ ( self ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCAmelCase ,'tf' ,12 ,**__UpperCAmelCase )
@require_torch
@slow
def snake_case__ ( self ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCAmelCase ,'pt' ,12 ,**__UpperCAmelCase )
@require_torch
@slow
def snake_case__ ( self ) -> Optional[Any]:
from transformers import BertModel
A__ = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__UpperCAmelCase ) )
vocab_file.flush()
A__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
A__ = BertModel(BertConfig(vocab_size=len(__UpperCAmelCase ) ) )
model.save_pretrained(__UpperCAmelCase )
self._test_export(__UpperCAmelCase ,'pt' ,12 ,__UpperCAmelCase )
@require_tf
@slow
def snake_case__ ( self ) -> Optional[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ = self._test_export(__UpperCAmelCase ,'tf' ,12 ,**__UpperCAmelCase )
A__ = quantize(Path(__UpperCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def snake_case__ ( self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ = self._test_export(__UpperCAmelCase ,'pt' ,12 ,**__UpperCAmelCase )
A__ = quantize(__UpperCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Union[str, Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
A__ = Path(__UpperCAmelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase )
return path
except Exception as e:
self.fail(__UpperCAmelCase )
@require_torch
@require_tokenizers
@slow
def snake_case__ ( self ) -> Optional[Any]:
from transformers import BertModel
A__ = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
A__ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,'pt' )
@require_tf
@require_tokenizers
@slow
def snake_case__ ( self ) -> Optional[Any]:
from transformers import TFBertModel
A__ = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
A__ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,'tf' )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
A__ = FeatureExtractionPipeline(__UpperCAmelCase ,__UpperCAmelCase )
A__ = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
A__ , A__ , A__ , A__ = infer_shapes(__UpperCAmelCase ,__UpperCAmelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCAmelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = ['input_ids', 'attention_mask', 'token_type_ids']
A__ = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
A__ , A__ = ensure_valid_input(FuncContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCAmelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCAmelCase ) ,set(__UpperCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCAmelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
A__ , A__ = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCAmelCase ) ,1 )
self.assertEqual(len(__UpperCAmelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 221 | 0 |
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set({'(', '[', '{'})
SCREAMING_SNAKE_CASE = set({')', ']', '}'})
SCREAMING_SNAKE_CASE = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(__a)):
if s[i] in open_brackets:
stack.append(s[i])
elif s[i] in closed_brackets and (
len(__a) == 0 or (len(__a) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__a) == 0
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = input('Enter sequence of brackets: ')
if is_balanced(__a):
print(__a , 'is balanced')
else:
print(__a , 'is not balanced')
if __name__ == "__main__":
main()
| 365 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a_ : Tuple = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
a_ : List[Any] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
a_ : List[str] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=None , a=True , a=False) -> Optional[Any]:
if rouge_types is None:
SCREAMING_SNAKE_CASE = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
SCREAMING_SNAKE_CASE = rouge_scorer.RougeScorer(rouge_types=a , use_stemmer=a)
if use_aggregator:
SCREAMING_SNAKE_CASE = scoring.BootstrapAggregator()
else:
SCREAMING_SNAKE_CASE = []
for ref, pred in zip(a , a):
SCREAMING_SNAKE_CASE = scorer.score(a , a)
if use_aggregator:
aggregator.add_scores(a)
else:
scores.append(a)
if use_aggregator:
SCREAMING_SNAKE_CASE = aggregator.aggregate()
else:
SCREAMING_SNAKE_CASE = {}
for key in scores[0]:
SCREAMING_SNAKE_CASE = [score[key] for score in scores]
return result
| 327 | 0 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase_ ( snake_case_ : Any ) -> Dict:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
super().__init__()
__lowerCAmelCase = module
__lowerCAmelCase = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__ ) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__ ) , )
__lowerCAmelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) + self.adapter(UpperCamelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = """bigscience/bloom-1b7"""
# Constant values
_SCREAMING_SNAKE_CASE : int = 2.109659552692574
_SCREAMING_SNAKE_CASE : Union[str, Any] = """Hello my name is"""
_SCREAMING_SNAKE_CASE : Dict = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = 10
def a ( self : Optional[int] ) -> List[str]:
# Models and tokenizer
__lowerCAmelCase = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def a ( self : Tuple ) -> Tuple:
super().setUp()
# Models and tokenizer
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
def a ( self : str ) -> Dict:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config""" ) )
__lowerCAmelCase = config.to_dict()
__lowerCAmelCase = config.to_diff_dict()
__lowerCAmelCase = config.to_json_string()
def a ( self : Tuple ) -> Any:
from bitsandbytes.nn import Paramsabit
__lowerCAmelCase = self.model_fpaa.get_memory_footprint()
__lowerCAmelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowerCAmelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : str ) -> List[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" )
__lowerCAmelCase = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS )
def a ( self : Dict ) -> Any:
__lowerCAmelCase = BitsAndBytesConfig()
__lowerCAmelCase = True
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""" )
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" )
__lowerCAmelCase = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS )
def a ( self : str ) -> str:
with self.assertRaises(UpperCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__ )
def a ( self : Tuple ) -> Any:
__lowerCAmelCase = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__ ):
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def a ( self : Union[str, Any] ) -> Optional[int]:
with self.assertRaises(UpperCamelCase__ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" )
__lowerCAmelCase = self.model_fpaa.to(torch.floataa )
__lowerCAmelCase = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowerCAmelCase = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
__lowerCAmelCase = self.model_fpaa.half()
# Check this does not throw an error
__lowerCAmelCase = self.model_fpaa.float()
def a ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a ( cls : str ) -> Union[str, Any]:
__lowerCAmelCase = "t5-small"
__lowerCAmelCase = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
__lowerCAmelCase = AutoTokenizer.from_pretrained(cls.model_name )
__lowerCAmelCase = "Translate in German: Hello, my dog is cute"
def a ( self : List[str] ) -> List[Any]:
gc.collect()
torch.cuda.empty_cache()
def a ( self : Dict ) -> Optional[int]:
from transformers import TaForConditionalGeneration
__lowerCAmelCase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowerCAmelCase = None
# test with `t5-small`
__lowerCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__lowerCAmelCase = model.generate(**UpperCamelCase__ )
# test with `flan-t5-small`
__lowerCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__lowerCAmelCase = model.generate(**UpperCamelCase__ )
__lowerCAmelCase = modules
def a ( self : str ) -> int:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowerCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__lowerCAmelCase = model.generate(**UpperCamelCase__ )
# test with `flan-t5-small`
__lowerCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__lowerCAmelCase = model.generate(**UpperCamelCase__ )
class _lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def a ( self : List[Any] ) -> Optional[Any]:
super().setUp()
# model_name
__lowerCAmelCase = "bigscience/bloom-560m"
__lowerCAmelCase = "t5-small"
# Different types of model
__lowerCAmelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
# Sequence classification model
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
# CausalLM model
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
# Seq2seq model
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
def a ( self : Optional[int] ) -> List[str]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> int:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def a ( self : Optional[int] ) -> List[str]:
super().setUp()
def a ( self : List[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : List[str] ) -> List[str]:
__lowerCAmelCase = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowerCAmelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def a ( self : Tuple ) -> Optional[Any]:
super().setUp()
def a ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
__lowerCAmelCase = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def a ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = "facebook/opt-350m"
super().setUp()
def a ( self : Optional[int] ) -> Dict:
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowerCAmelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowerCAmelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__ ) ):
__lowerCAmelCase = LoRALayer(module.q_proj , rank=16 )
__lowerCAmelCase = LoRALayer(module.k_proj , rank=16 )
__lowerCAmelCase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowerCAmelCase = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowerCAmelCase = model.forward(**UpperCamelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(UpperCamelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = """gpt2-xl"""
_SCREAMING_SNAKE_CASE : int = 3.3191854854152187
| 229 |
import argparse
import os
import re
SCREAMING_SNAKE_CASE__ : List[Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> int:
with open(_SCREAMING_SNAKE_CASE ,"r" ,encoding="utf-8" ) as f:
lowerCamelCase : List[Any] = f.read()
lowerCamelCase : str = content.split("\n" )
lowerCamelCase : int = []
lowerCamelCase : List[Any] = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowerCamelCase : Optional[int] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowerCamelCase : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowerCamelCase : List[str] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowerCamelCase : Union[str, Any] = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(_SCREAMING_SNAKE_CASE ) )
elif "\n".join(_SCREAMING_SNAKE_CASE ) != content:
return True
def A ( _SCREAMING_SNAKE_CASE = False ) -> List[str]:
lowerCamelCase : str = [os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith(".py" )]
lowerCamelCase : Union[str, Any] = [sort_auto_mapping(_SCREAMING_SNAKE_CASE ,overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = [f for f, d in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 48 | 0 |
from __future__ import annotations
from math import gcd
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: int = 2 , _lowerCamelCase: int = 1 , _lowerCamelCase: int = 3 , ) -> int | None:
'''simple docstring'''
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: int ) -> int:
return (pow(lowerCAmelCase__ , 2 ) + step) % modulus
for _ in range(lowerCAmelCase__ ):
# These track the position within the cycle detection logic.
__lowerCamelCase : Tuple = seed
__lowerCamelCase : Dict = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__lowerCamelCase : Optional[Any] = rand_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCamelCase : Dict = rand_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCamelCase : Dict = rand_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__lowerCamelCase : List[str] = gcd(hare - tortoise , lowerCAmelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__lowerCamelCase : Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__A = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
__A = parser.parse_args()
__A = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
__A = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""") | 365 | """simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: Optional[int]=7 ) -> int:
'''simple docstring'''
__lowerCamelCase : List[str] = None
if token is not None:
__lowerCamelCase : List[Any] = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
__lowerCamelCase : Optional[Any] = "636036"
__lowerCamelCase : Dict = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
__lowerCamelCase : List[str] = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
return result["workflow_runs"]
def lowercase_ ( _lowerCamelCase: Tuple ) -> int:
'''simple docstring'''
__lowerCamelCase : List[Any] = get_daily_ci_runs(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__lowerCamelCase : Optional[int] = workflow_run["id"]
break
return workflow_run_id
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: int , _lowerCamelCase: str ) -> Any:
'''simple docstring'''
__lowerCamelCase : Any = get_last_daily_ci_runs(_lowerCamelCase )
if workflow_run_id is not None:
__lowerCamelCase : Dict = get_artifacts_links(worflow_run_id=_lowerCamelCase , token=_lowerCamelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__lowerCamelCase : int = artifacts_links[artifact_name]
download_artifact(
artifact_name=_lowerCamelCase , artifact_url=_lowerCamelCase , output_dir=_lowerCamelCase , token=_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Dict , _lowerCamelCase: int ) -> Any:
'''simple docstring'''
get_last_daily_ci_artifacts(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : int = {}
for artifact_name in artifact_names:
__lowerCamelCase : Tuple = os.path.join(_lowerCamelCase , F"""{artifact_name}.zip""" )
if os.path.isfile(_lowerCamelCase ):
__lowerCamelCase : Optional[int] = {}
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
with z.open(_lowerCamelCase ) as f:
__lowerCamelCase : Tuple = f.read().decode("UTF-8" )
return results | 64 | 0 |
from math import pi, sqrt, tan
def snake_case_ ( snake_case ) -> float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def snake_case_ ( snake_case , snake_case , snake_case ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def snake_case_ ( snake_case ) -> float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def snake_case_ ( snake_case ) -> float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def snake_case_ ( snake_case , snake_case ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def snake_case_ ( snake_case , snake_case , snake_case ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowercase__: Tuple = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def snake_case_ ( snake_case , snake_case ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def snake_case_ ( snake_case , snake_case ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(snake_case , 2 ) * torus_radius * tube_radius
def snake_case_ ( snake_case , snake_case ) -> float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def snake_case_ ( snake_case ) -> float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def snake_case_ ( snake_case , snake_case ) -> float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def snake_case_ ( snake_case , snake_case , snake_case ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowercase__: Optional[int] = (sidea + sidea + sidea) / 2
lowercase__: List[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def snake_case_ ( snake_case , snake_case ) -> float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def snake_case_ ( snake_case , snake_case , snake_case ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def snake_case_ ( snake_case ) -> float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def snake_case_ ( snake_case , snake_case ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def snake_case_ ( snake_case , snake_case ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def snake_case_ ( snake_case , snake_case ) -> float:
if not isinstance(snake_case , snake_case ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 196 |
from __future__ import annotations
from collections.abc import Callable
__lowerCAmelCase = list[list[float | int]]
def snake_case_ ( snake_case , snake_case ) -> Matrix:
lowercase__: int = len(snake_case )
lowercase__: Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case )]
lowercase__: int
lowercase__: int
lowercase__: int
lowercase__: int
lowercase__: int
lowercase__: float
for row in range(snake_case ):
for col in range(snake_case ):
lowercase__: List[Any] = matrix[row][col]
lowercase__: Optional[int] = vector[row][0]
lowercase__: str = 0
lowercase__: Any = 0
while row < size and col < size:
# pivoting
lowercase__: List[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case , snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowercase__ , lowercase__: Optional[int] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , snake_case ):
lowercase__: Any = augmented[rowa][col] / augmented[row][col]
lowercase__: int = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , snake_case ):
for row in range(snake_case ):
lowercase__: Union[str, Any] = augmented[row][col] / augmented[col][col]
for cola in range(snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(snake_case )
]
def snake_case_ ( snake_case ) -> Callable[[int], int]:
lowercase__: int = len(snake_case )
lowercase__: Matrix = [[0 for _ in range(snake_case )] for _ in range(snake_case )]
lowercase__: Matrix = [[0] for _ in range(snake_case )]
lowercase__: Matrix
lowercase__: int
lowercase__: int
lowercase__: int
for x_val, y_val in enumerate(snake_case ):
for col in range(snake_case ):
lowercase__: List[str] = (x_val + 1) ** (size - col - 1)
lowercase__: str = y_val
lowercase__: Optional[int] = solve(snake_case , snake_case )
def interpolated_func(snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case ) )
return interpolated_func
def snake_case_ ( snake_case ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def snake_case_ ( snake_case = question_function , snake_case = 10 ) -> int:
lowercase__: list[int] = [func(snake_case ) for x_val in range(1 , order + 1 )]
lowercase__: list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowercase__: int = 0
lowercase__: Callable[[int], int]
lowercase__: int
for poly in polynomials:
lowercase__: List[str] = 1
while func(snake_case ) == poly(snake_case ):
x_val += 1
ret += poly(snake_case )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 196 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["pixel_values"]
def __init__(self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : Optional[int] , ) ->None:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =size if size is not None else {"height": 224, "width": 224}
lowerCamelCase__: List[str] =get_size_dict(UpperCAmelCase_)
lowerCamelCase__: List[str] =crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase__: str =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ , param_name="crop_size")
lowerCamelCase__: List[Any] =do_resize
lowerCamelCase__: Dict =do_rescale
lowerCamelCase__: List[Any] =do_normalize
lowerCamelCase__: Union[str, Any] =do_center_crop
lowerCamelCase__: List[str] =crop_size
lowerCamelCase__: str =size
lowerCamelCase__: Union[str, Any] =resample
lowerCamelCase__: int =rescale_factor
lowerCamelCase__: Dict =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase__: List[str] =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : int , ) ->np.ndarray:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =get_size_dict(UpperCAmelCase_)
if "shortest_edge" in size:
lowerCamelCase__: List[str] =get_resize_output_image_size(UpperCAmelCase_ , size=size["shortest_edge"] , default_to_square=UpperCAmelCase_)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowerCamelCase__: List[str] =(size["height"], size["width"])
else:
raise ValueError(F"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""")
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->np.ndarray:
'''simple docstring'''
lowerCamelCase__: Optional[int] =get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]) ->np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ) ->np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : Tuple , ) ->BatchFeature:
'''simple docstring'''
lowerCamelCase__: List[str] =do_resize if do_resize is not None else self.do_resize
lowerCamelCase__: Dict =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__: Union[str, Any] =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__: Any =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__: int =crop_size if crop_size is not None else self.crop_size
lowerCamelCase__: Dict =get_size_dict(UpperCAmelCase_ , param_name="crop_size" , default_to_square=UpperCAmelCase_)
lowerCamelCase__: Dict =resample if resample is not None else self.resample
lowerCamelCase__: List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__: int =image_mean if image_mean is not None else self.image_mean
lowerCamelCase__: Optional[Any] =image_std if image_std is not None else self.image_std
lowerCamelCase__: Tuple =size if size is not None else self.size
lowerCamelCase__: Optional[int] =get_size_dict(UpperCAmelCase_)
if not is_batched(UpperCAmelCase_):
lowerCamelCase__: Optional[Any] =[images]
if not valid_images(UpperCAmelCase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
# All transformations expect numpy arrays.
lowerCamelCase__: str =[to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
lowerCamelCase__: Tuple =[self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
lowerCamelCase__: Dict =[self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
lowerCamelCase__: List[Any] =[self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
lowerCamelCase__: Union[str, Any] =[self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
lowerCamelCase__: int =[to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
lowerCamelCase__: int ={"pixel_values": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 273 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : int , UpperCAmelCase_ : Distribution , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=0) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =1.0 if scale is None else scale
lowerCamelCase__: List[Any] =0.0 if loc is None else loc
super().__init__(UpperCAmelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCAmelCase_)])
@property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
return self.variance.sqrt()
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Callable[..., Tuple[torch.Tensor]] , **UpperCAmelCase_ : Dict) ->None:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Tuple =args_dim
lowerCamelCase__: Any =nn.ModuleList([nn.Linear(UpperCAmelCase_ , UpperCAmelCase_) for dim in args_dim.values()])
lowerCamelCase__: Any =domain_map
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : torch.Tensor) ->Tuple[torch.Tensor]:
'''simple docstring'''
lowerCamelCase__: Any =[proj(UpperCAmelCase_) for proj in self.proj]
return self.domain_map(*UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Any =function
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[Any] , *UpperCAmelCase_ : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
return self.function(UpperCAmelCase_ , *UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
def __init__(self : str , UpperCAmelCase_ : int = 1) ->None:
'''simple docstring'''
lowerCamelCase__: List[str] =dim
lowerCamelCase__: int ={k: dim * self.args_dim[k] for k in self.args_dim}
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : str) ->Optional[Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCAmelCase_)
else:
return Independent(self.distribution_class(*UpperCAmelCase_) , 1)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , ) ->Distribution:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self._base_distribution(UpperCAmelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCAmelCase_ , loc=UpperCAmelCase_ , scale=UpperCAmelCase_ , event_dim=self.event_dim)
@property
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->int:
'''simple docstring'''
return len(self.event_shape)
@property
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : int) ->nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=UpperCAmelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def SCREAMING_SNAKE_CASE_ (self : Any , *UpperCAmelCase_ : torch.Tensor) ->Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : torch.Tensor) ->torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCAmelCase_) + 4.0)) / 2.0
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = {"df": 1, "loc": 1, "scale": 1}
lowercase_ = StudentT
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[int] , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor) ->Dict:
'''simple docstring'''
lowerCamelCase__: str =cls.squareplus(UpperCAmelCase_).clamp_min(torch.finfo(scale.dtype).eps)
lowerCamelCase__: Any =2.0 + cls.squareplus(UpperCAmelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = {"loc": 1, "scale": 1}
lowercase_ = Normal
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[Any] , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =cls.squareplus(UpperCAmelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = {"total_count": 1, "logits": 1}
lowercase_ = NegativeBinomial
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[int] , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =cls.squareplus(UpperCAmelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Tuple) ->Distribution:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Any =distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCAmelCase_ , logits=UpperCAmelCase_)
else:
return Independent(self.distribution_class(total_count=UpperCAmelCase_ , logits=UpperCAmelCase_) , 1)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None) ->Distribution:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits))
| 273 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return abs(lowerCamelCase__ ) if a == 0 else greatest_common_divisor(b % a , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
A_, A_ : Optional[Any] = y, x % y
return abs(lowerCamelCase__ )
def a ( ):
'''simple docstring'''
try:
A_ : str = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
A_ : Union[str, Any] = int(nums[0] )
A_ : Optional[Any] = int(nums[1] )
print(
f'greatest_common_divisor({num_a}, {num_a}) = '
f'{greatest_common_divisor(lowerCamelCase__ , lowerCamelCase__ )}' )
print(f'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCamelCase__ , lowerCamelCase__ )}' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main() | 206 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=10_24 , lowerCamelCase__=10_24 , lowerCamelCase__=False , **lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = AutoTokenizer.from_pretrained(lowerCamelCase__ )
A_ : Union[str, Any] = SeqaSeqDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , type_path="""train""" , **lowerCamelCase__ )
A_ : Optional[Any] = tok.pad_token_id
def get_lens(lowerCamelCase__ ):
A_ : int = tqdm(
DataLoader(lowerCamelCase__ , batch_size=5_12 , num_workers=8 , shuffle=lowerCamelCase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
A_ : int = []
for batch in dl:
A_ : str = batch["""input_ids"""].ne(lowerCamelCase__ ).sum(1 ).tolist()
A_ : Tuple = batch["""labels"""].ne(lowerCamelCase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCamelCase__ , lowerCamelCase__ ):
max_lens.append(max(lowerCamelCase__ , lowerCamelCase__ ) )
else:
max_lens.extend(lowerCamelCase__ )
return max_lens
A_ : str = get_lens(lowerCamelCase__ )
A_ : Dict = SeqaSeqDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , type_path="""val""" , **lowerCamelCase__ )
A_ : List[Any] = get_lens(lowerCamelCase__ )
pickle_save(lowerCamelCase__ , train_ds.len_file )
pickle_save(lowerCamelCase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 206 | 1 |
"""simple docstring"""
import operator
def _A (__a , __a = False , __a = None ) -> list:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = operator.lt if reverse else operator.gt
SCREAMING_SNAKE_CASE_ : List[Any] = solution or []
if not arr:
return solution
SCREAMING_SNAKE_CASE_ : Tuple = [arr.pop(0 )]
for i, item in enumerate(__a ):
if _operator(__a , sublist[-1] ):
sublist.append(__a )
arr.pop(__a )
# merging sublist into solution list
if not solution:
solution.extend(__a )
else:
while sublist:
SCREAMING_SNAKE_CASE_ : List[Any] = sublist.pop(0 )
for i, xx in enumerate(__a ):
if not _operator(__a , __a ):
solution.insert(__a , __a )
break
else:
solution.append(__a )
strand_sort(__a , __a , __a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 318 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "SpeechT5FeatureExtractor"
__UpperCamelCase = "SpeechT5Tokenizer"
def __init__( self : Any , lowercase_ : Dict , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(lowercase_ , lowercase_)
def __call__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('''text''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''text_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''audio_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''sampling_rate''' , lowercase_)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
elif text is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Any = None
if audio_target is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = targets['''input_values''']
elif text_target is not None:
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : int = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Any = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowercase_ : Tuple , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''input_values''' , lowercase_)
SCREAMING_SNAKE_CASE_ : int = kwargs.pop('''input_ids''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''labels''' , lowercase_)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
elif input_ids is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.pad(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase_ , lowercase_) and "input_ids" in labels[0]):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.pad(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.feature_size
SCREAMING_SNAKE_CASE_ : Optional[int] = self.feature_extractor.num_mel_bins
SCREAMING_SNAKE_CASE_ : str = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : str = feature_size_hack
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_values''']
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Dict = labels
SCREAMING_SNAKE_CASE_ : List[str] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Tuple):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowercase_ : Dict , **lowercase_ : List[Any]):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
| 318 | 1 |
from typing import Dict, Optional
import numpy as np
import datasets
__snake_case = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
__snake_case = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
__snake_case = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
UpperCamelCase :Dict = new_id
# turn into Numpy arrays
UpperCamelCase :Dict = np.array(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = np.array(SCREAMING_SNAKE_CASE__ )
if reduce_labels:
UpperCamelCase :str = 255
UpperCamelCase :Any = label - 1
UpperCamelCase :Optional[int] = 255
UpperCamelCase :List[str] = label != ignore_index
UpperCamelCase :List[str] = np.not_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = pred_label[mask]
UpperCamelCase :Dict = np.array(SCREAMING_SNAKE_CASE__ )[mask]
UpperCamelCase :Any = pred_label[pred_label == label]
UpperCamelCase :Optional[Any] = np.histogram(SCREAMING_SNAKE_CASE__ , bins=SCREAMING_SNAKE_CASE__ , range=(0, num_labels - 1) )[0]
UpperCamelCase :Optional[int] = np.histogram(SCREAMING_SNAKE_CASE__ , bins=SCREAMING_SNAKE_CASE__ , range=(0, num_labels - 1) )[0]
UpperCamelCase :Union[str, Any] = np.histogram(SCREAMING_SNAKE_CASE__ , bins=SCREAMING_SNAKE_CASE__ , range=(0, num_labels - 1) )[0]
UpperCamelCase :Optional[int] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ):
UpperCamelCase :Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa )
UpperCamelCase :List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
UpperCamelCase :List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
UpperCamelCase :int = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = intersect_and_union(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ):
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Dict = total_intersect_and_union(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# compute metrics
UpperCamelCase :str = {}
UpperCamelCase :List[str] = total_area_intersect.sum() / total_area_label.sum()
UpperCamelCase :List[Any] = total_area_intersect / total_area_union
UpperCamelCase :Optional[Any] = total_area_intersect / total_area_label
UpperCamelCase :Optional[int] = np.nanmean(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = np.nanmean(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = all_acc
UpperCamelCase :List[Any] = iou
UpperCamelCase :Optional[Any] = acc
if nan_to_num is not None:
UpperCamelCase :Optional[Any] = {metric: np.nan_to_num(SCREAMING_SNAKE_CASE__ , nan=SCREAMING_SNAKE_CASE__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , ) -> Any:
UpperCamelCase :str = mean_iou(
results=SCREAMING_SNAKE_CASE_ , gt_seg_maps=SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , ignore_index=SCREAMING_SNAKE_CASE_ , nan_to_num=SCREAMING_SNAKE_CASE_ , label_map=SCREAMING_SNAKE_CASE_ , reduce_labels=SCREAMING_SNAKE_CASE_ , )
return iou_result
| 259 |
from __future__ import annotations
from typing import Any
def _A ( SCREAMING_SNAKE_CASE__ : list[Any] ):
create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 )
def _A ( SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : int ):
if index == len(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ )
return
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__snake_case = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 259 | 1 |
import math
def __lowerCAmelCase ()-> None:
"""simple docstring"""
snake_case_ = input('''Enter message: ''' )
snake_case_ = int(input(f'''Enter key [2-{len(SCREAMING_SNAKE_CASE ) - 1}]: ''' ) )
snake_case_ = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
snake_case_ = encrypt_message(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif mode.lower().startswith('''d''' ):
snake_case_ = decrypt_message(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f'''Output:\n{text + "|"}''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = [''''''] * key
for col in range(SCREAMING_SNAKE_CASE ):
snake_case_ = col
while pointer < len(SCREAMING_SNAKE_CASE ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = math.ceil(len(SCREAMING_SNAKE_CASE ) / key )
snake_case_ = key
snake_case_ = (num_cols * num_rows) - len(SCREAMING_SNAKE_CASE )
snake_case_ = [''''''] * num_cols
snake_case_ = 0
snake_case_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
snake_case_ = 0
row += 1
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 267 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> list[int]:
"""simple docstring"""
snake_case_ = int(SCREAMING_SNAKE_CASE )
# Initialize Result
snake_case_ = []
# Traverse through all denomination
for denomination in reversed(SCREAMING_SNAKE_CASE ):
# Find denominations
while int(SCREAMING_SNAKE_CASE ) >= int(SCREAMING_SNAKE_CASE ):
total_value -= int(SCREAMING_SNAKE_CASE )
answer.append(SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase = []
UpperCAmelCase = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCAmelCase = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
UpperCAmelCase = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
UpperCAmelCase = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f'''Following is minimal change for {value}: ''')
UpperCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """) | 267 | 1 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = SwinConfig()
__UpperCamelCase :Tuple = swin_name.split('''_''' )
__UpperCamelCase :Union[str, Any] = name_split[1]
__UpperCamelCase :List[Any] = int(name_split[4] )
__UpperCamelCase :Tuple = int(name_split[3][-1] )
if model_size == "tiny":
__UpperCamelCase :List[str] = 96
__UpperCamelCase :int = (2, 2, 6, 2)
__UpperCamelCase :str = (3, 6, 12, 24)
elif model_size == "small":
__UpperCamelCase :str = 96
__UpperCamelCase :int = (2, 2, 18, 2)
__UpperCamelCase :int = (3, 6, 12, 24)
elif model_size == "base":
__UpperCamelCase :List[Any] = 128
__UpperCamelCase :List[str] = (2, 2, 18, 2)
__UpperCamelCase :List[str] = (4, 8, 16, 32)
else:
__UpperCamelCase :str = 192
__UpperCamelCase :Union[str, Any] = (2, 2, 18, 2)
__UpperCamelCase :Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
__UpperCamelCase :Dict = 21_841
else:
__UpperCamelCase :List[str] = 1_000
__UpperCamelCase :str = '''huggingface/label-files'''
__UpperCamelCase :Optional[int] = '''imagenet-1k-id2label.json'''
__UpperCamelCase :str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase :Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__UpperCamelCase :Optional[Any] = idalabel
__UpperCamelCase :Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCamelCase :Union[str, Any] = img_size
__UpperCamelCase :Any = num_classes
__UpperCamelCase :Union[str, Any] = embed_dim
__UpperCamelCase :Optional[Any] = depths
__UpperCamelCase :Any = num_heads
__UpperCamelCase :str = window_size
return config
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if "patch_embed.proj" in name:
__UpperCamelCase :List[str] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__UpperCamelCase :int = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__UpperCamelCase :List[Any] = '''encoder.''' + name
if "attn.proj" in name:
__UpperCamelCase :List[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__UpperCamelCase :List[Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__UpperCamelCase :List[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__UpperCamelCase :Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__UpperCamelCase :Optional[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__UpperCamelCase :Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
__UpperCamelCase :Optional[Any] = '''layernorm.weight'''
if name == "norm.bias":
__UpperCamelCase :Optional[Any] = '''layernorm.bias'''
if "head" in name:
__UpperCamelCase :Dict = name.replace('''head''' , '''classifier''' )
else:
__UpperCamelCase :Dict = '''swin.''' + name
return name
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__UpperCamelCase :Optional[int] = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
__UpperCamelCase :Tuple = key.split('''.''' )
__UpperCamelCase :Union[str, Any] = int(key_split[1] )
__UpperCamelCase :str = int(key_split[3] )
__UpperCamelCase :Any = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCamelCase :Dict = val[:dim, :]
__UpperCamelCase :List[str] = val[
dim : dim * 2, :
]
__UpperCamelCase :List[str] = val[-dim:, :]
else:
__UpperCamelCase :int = val[
:dim
]
__UpperCamelCase :List[Any] = val[
dim : dim * 2
]
__UpperCamelCase :Any = val[
-dim:
]
else:
__UpperCamelCase :List[str] = val
return orig_state_dict
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
__UpperCamelCase :List[Any] = get_swin_config(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
__UpperCamelCase :Union[str, Any] = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase :List[str] = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
__UpperCamelCase :Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__UpperCamelCase :Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
__UpperCamelCase :Any = timm_model(inputs['''pixel_values'''] )
__UpperCamelCase :List[Any] = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 43 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> Tuple:
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> str:
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def __lowerCAmelCase ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def __lowerCAmelCase ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@require_beam
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> Optional[Any]:
import apache_beam as beam
__UpperCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet
__UpperCamelCase : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
__UpperCamelCase : List[str] = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> str:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def a_ (self ) -> List[str]:
__UpperCamelCase : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
__UpperCamelCase : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : int = BlenderbotSmallTokenizer
__lowercase : Dict = False
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
__SCREAMING_SNAKE_CASE = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
__SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
__SCREAMING_SNAKE_CASE = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(lowerCAmelCase__) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(lowerCAmelCase__))
def snake_case_ ( self , **lowerCAmelCase__):
kwargs.update(self.special_tokens_map)
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """adapt act apte"""
__SCREAMING_SNAKE_CASE = """adapt act apte"""
return input_text, output_text
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
__SCREAMING_SNAKE_CASE = """adapt act apte"""
__SCREAMING_SNAKE_CASE = ["""adapt""", """act""", """ap@@""", """te"""]
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__SCREAMING_SNAKE_CASE = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""")
assert tok("""sam""").input_ids == [1_3_8_4]
__SCREAMING_SNAKE_CASE = """I am a small frog."""
__SCREAMING_SNAKE_CASE = tok([src_text] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["""input_ids"""]
__SCREAMING_SNAKE_CASE = tok.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__)[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""")
__SCREAMING_SNAKE_CASE = """I am a small frog ."""
__SCREAMING_SNAKE_CASE = """."""
__SCREAMING_SNAKE_CASE = tok(lowerCAmelCase__)["""input_ids"""]
__SCREAMING_SNAKE_CASE = tok(lowerCAmelCase__)["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 360 |
"""simple docstring"""
import inspect
import unittest
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def snake_case_ ( self):
import diffusers
from diffusers.dependency_versions_table import deps
__SCREAMING_SNAKE_CASE = inspect.getmembers(lowerCAmelCase__ , inspect.isclass)
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__SCREAMING_SNAKE_CASE = """k-diffusion"""
elif backend == "invisible_watermark":
__SCREAMING_SNAKE_CASE = """invisible-watermark"""
assert backend in deps, f"{backend} is not in the deps table!"
| 255 | 0 |
"""simple docstring"""
import os
_UpperCAmelCase = {"""I""": 1, """V""": 5, """X""": 1_0, """L""": 5_0, """C""": 1_0_0, """D""": 5_0_0, """M""": 1_0_0_0}
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =0
SCREAMING_SNAKE_CASE_: int =0
while index < len(lowercase ) - 1:
SCREAMING_SNAKE_CASE_: Dict =SYMBOLS[numerals[index]]
SCREAMING_SNAKE_CASE_: List[Any] =SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =""""""
SCREAMING_SNAKE_CASE_: str =num // 1000
numerals += m_count * "M"
num %= 1000
SCREAMING_SNAKE_CASE_: Optional[Any] =num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
SCREAMING_SNAKE_CASE_: str =num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __magic_name__ ( lowercase = "/p089_roman.txt" ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =0
with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea:
SCREAMING_SNAKE_CASE_: List[str] =filea.readlines()
for line in lines:
SCREAMING_SNAKE_CASE_: Optional[int] =line.strip()
SCREAMING_SNAKE_CASE_: Any =parse_roman_numerals(lowercase )
SCREAMING_SNAKE_CASE_: str =generate_roman_numerals(lowercase )
savings += len(lowercase ) - len(lowercase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 173 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __magic_name__ ( lowercase ):
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[str] =ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(lowercase )
EnvironmentCommand.register_subcommand(lowercase )
TestCommand.register_subcommand(lowercase )
RunBeamCommand.register_subcommand(lowercase )
DummyDataCommand.register_subcommand(lowercase )
# Parse args
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_known_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
SCREAMING_SNAKE_CASE_: Dict =parse_unknown_args(lowercase )
# Run
SCREAMING_SNAKE_CASE_: Tuple =args.func(lowercase , **lowercase )
service.run()
if __name__ == "__main__":
main()
| 173 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ : Dict = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 371 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
lowerCAmelCase_ : str = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase_ : Union[str, Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(lowercase )
return images
def _lowerCamelCase ( lowercase : int ) -> List[Any]:
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
_a = [Image.fromarray(lowercase ) for image in images]
return pil_images
| 346 | 0 |
"""simple docstring"""
from math import pow
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
__SCREAMING_SNAKE_CASE = int(pow(UpperCamelCase_ , UpperCamelCase_ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = backtrack(
UpperCamelCase_ , UpperCamelCase_ , current_number + 1 , UpperCamelCase_ , UpperCamelCase_ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = backtrack(
UpperCamelCase_ , UpperCamelCase_ , current_number + 1 , UpperCamelCase_ , UpperCamelCase_ )
return current_sum, solutions_count
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(UpperCamelCase_ , UpperCamelCase_ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCAmelCase__ = 100
lowerCAmelCase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCAmelCase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase : set[int] = set()
lowerCAmelCase : int
lowerCAmelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def a__ ( SCREAMING_SNAKE_CASE : int = 5_0_0_0 ):
'''simple docstring'''
for number_to_partition in range(1 , SCREAMING_SNAKE_CASE ):
if len(partition(SCREAMING_SNAKE_CASE ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 108 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A : Any = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Optional[int]=9_9 ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Union[str, Any]=3_2 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Optional[Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :int=5_1_2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Any=0 ,_UpperCamelCase :str=None ,):
snake_case_ : str = parent
snake_case_ : int = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Union[str, Any] = use_input_mask
snake_case_ : List[str] = use_labels
snake_case_ : int = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[Any] = projection_dim
snake_case_ : Dict = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : int = dropout
snake_case_ : int = attention_dropout
snake_case_ : Dict = max_position_embeddings
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Dict = scope
snake_case_ : Union[str, Any] = bos_token_id
def a__ ( self :Any ):
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case_ : Union[str, Any] = None
if self.use_input_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
snake_case_ : int = input_mask.numpy()
snake_case_ , snake_case_ : Tuple = input_mask.shape
snake_case_ : Any = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCamelCase ):
snake_case_ : Optional[int] = 1
snake_case_ : List[str] = 0
snake_case_ : Tuple = self.get_config()
return config, input_ids, tf.convert_to_tensor(_UpperCamelCase )
def a__ ( self :str ):
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ):
snake_case_ : List[str] = TFBlipTextModel(config=_UpperCamelCase )
snake_case_ : List[Any] = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,training=_UpperCamelCase )
snake_case_ : Any = model(_UpperCamelCase ,training=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def a__ ( self :List[str] ):
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : str = config_and_inputs
snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else ()
lowercase : int = False
lowercase : List[Any] = False
lowercase : Dict = False
def a__ ( self :List[Any] ):
snake_case_ : List[str] = BlipTextModelTester(self )
snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 )
def a__ ( self :Union[str, Any] ):
self.config_tester.run_common_tests()
def a__ ( self :Union[str, Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def a__ ( self :Tuple ):
pass
def a__ ( self :Tuple ):
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def a__ ( self :Any ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def a__ ( self :Tuple ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def a__ ( self :List[Any] ):
pass
@slow
def a__ ( self :Any ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = TFBlipTextModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def a__ ( self :Dict ,_UpperCamelCase :Tuple=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase ) | 8 | 1 |
from itertools import count
def a_ ( __lowercase : int = 50 ) -> int:
_snake_case = [1] * min_block_length
for n in count(__lowercase ):
fill_count_functions.append(1 )
for block_length in range(__lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(F'{solution() = }') | 282 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 282 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase__ = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class a__ ( snake_case__ ):
_a : Union[str, Any] = """ernie_m"""
_a : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , _A = 2_5_0_0_0_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_4 , _A = 0.02 , _A = 1 , _A = 1E-0_5 , _A=None , _A=False , _A=0.0 , **_A , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , **_A )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = classifier_dropout
__lowerCAmelCase = is_decoder
__lowerCAmelCase = act_dropout
| 102 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class a__ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=9_9 , _A=3_2 , _A=4 , _A=4 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=0.02 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = None
__lowerCAmelCase = vocab_size - 1
__lowerCAmelCase = vocab_size - 1
__lowerCAmelCase = vocab_size - 1
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = 2_0
__lowerCAmelCase = model_class_name(_A )
__lowerCAmelCase = model.init_cache(input_ids.shape[0] , _A )
__lowerCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_A , past_key_values=_A , position_ids=_A , )
__lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
__lowerCAmelCase = model(
input_ids[:, -1:] , attention_mask=_A , past_key_values=outputs_cache.past_key_values , position_ids=_A , )
__lowerCAmelCase = model(_A )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = 2_0
__lowerCAmelCase = model_class_name(_A )
__lowerCAmelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase = model.init_cache(input_ids.shape[0] , _A )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_A , past_key_values=_A , position_ids=_A , )
__lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
__lowerCAmelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_A , position_ids=_A , )
__lowerCAmelCase = model(_A , attention_mask=_A )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : str = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_a : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = FlaxGPTJModelTester(self )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_A , _A , _A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_A , _A , _A , _A )
@tooslow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
__lowerCAmelCase = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=_A , truncation=_A )
__lowerCAmelCase = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
__lowerCAmelCase = False
__lowerCAmelCase = model.config.eos_token_id
__lowerCAmelCase = jax.jit(model.generate )
__lowerCAmelCase = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase = tokenizer.batch_decode(_A , skip_special_tokens=_A )
__lowerCAmelCase = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(_A , _A )
@is_pt_flax_cross_test
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase = self._prepare_for_class(_A , _A )
__lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase = getattr(_A , _A )
__lowerCAmelCase , __lowerCAmelCase = pt_inputs["input_ids"].shape
__lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = pt_model_class(_A ).eval()
__lowerCAmelCase = model_class(_A , dtype=jnp.floataa )
__lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A )
__lowerCAmelCase = fx_state
with torch.no_grad():
__lowerCAmelCase = pt_model(**_A ).to_tuple()
__lowerCAmelCase = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_A )
__lowerCAmelCase = model_class.from_pretrained(_A , from_pt=_A )
__lowerCAmelCase = fx_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase = self._prepare_for_class(_A , _A )
__lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase = getattr(_A , _A )
__lowerCAmelCase = pt_model_class(_A ).eval()
__lowerCAmelCase = model_class(_A , dtype=jnp.floataa )
__lowerCAmelCase = load_flax_weights_in_pytorch_model(_A , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase = pt_inputs["input_ids"].shape
__lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 0
__lowerCAmelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase = pt_model(**_A ).to_tuple()
__lowerCAmelCase = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_A )
__lowerCAmelCase = pt_model_class.from_pretrained(_A , from_flax=_A )
with torch.no_grad():
__lowerCAmelCase = pt_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
__lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
| 102 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_A = 'src/diffusers'
_A = '.'
# This is to make sure the diffusers module imported is the one in the repo.
_A = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
_A = spec.loader.load_module()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return line.startswith(SCREAMING_SNAKE_CASE__ ) or len(SCREAMING_SNAKE_CASE__ ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , SCREAMING_SNAKE_CASE__ ) is not None
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =object_name.split('.' )
__UpperCamelCase =0
# First let's find the module where our object lives.
__UpperCamelCase =parts[i]
while i < len(SCREAMING_SNAKE_CASE__ ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , F'{module}.py' ) ):
i += 1
if i < len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , parts[i] )
if i >= len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , F'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase =f.readlines()
# Now let's find the class / func in the code!
__UpperCamelCase =''
__UpperCamelCase =0
for name in parts[i + 1 :]:
while (
line_index < len(SCREAMING_SNAKE_CASE__ ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__UpperCamelCase =line_index
while line_index < len(SCREAMING_SNAKE_CASE__ ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__UpperCamelCase =lines[start_index:line_index]
return "".join(SCREAMING_SNAKE_CASE__ )
_A = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
_A = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
_A = re.compile(R'<FILL\s+[^>]*>')
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =code.split('\n' )
__UpperCamelCase =0
while idx < len(SCREAMING_SNAKE_CASE__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(SCREAMING_SNAKE_CASE__ ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =len(get_indent(SCREAMING_SNAKE_CASE__ ) ) > 0
if has_indent:
__UpperCamelCase =F'class Bla:\n{code}'
__UpperCamelCase =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =style_docstrings_in_code(SCREAMING_SNAKE_CASE__ )
return result[len('class Bla:\n' ) :] if has_indent else result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ):
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase =f.readlines()
__UpperCamelCase =[]
__UpperCamelCase =0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =_re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =search.groups()
__UpperCamelCase =find_code_in_diffusers(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =get_indent(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =line_index + 1 if indent == theoretical_indent else line_index + 2
__UpperCamelCase =theoretical_indent
__UpperCamelCase =start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__UpperCamelCase =True
while line_index < len(SCREAMING_SNAKE_CASE__ ) and should_continue:
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE__ ):
break
__UpperCamelCase =lines[line_index]
__UpperCamelCase =_should_continue(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and re.search(F'^{indent}# End copy' , SCREAMING_SNAKE_CASE__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__UpperCamelCase =lines[start_index:line_index]
__UpperCamelCase =''.join(SCREAMING_SNAKE_CASE__ )
# Remove any nested `Copied from` comments to avoid circular copies
__UpperCamelCase =[line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE__ ) is None]
__UpperCamelCase ='\n'.join(SCREAMING_SNAKE_CASE__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__UpperCamelCase =replace_pattern.replace('with' , '' ).split(',' )
__UpperCamelCase =[_re_replace_pattern.search(SCREAMING_SNAKE_CASE__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =pattern.groups()
__UpperCamelCase =re.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if option.strip() == "all-casing":
__UpperCamelCase =re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__UpperCamelCase =blackify(lines[start_index - 1] + theoretical_code )
__UpperCamelCase =theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__UpperCamelCase =lines[:start_index] + [theoretical_code] + lines[line_index:]
__UpperCamelCase =start_index + 1
if overwrite and len(SCREAMING_SNAKE_CASE__ ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(SCREAMING_SNAKE_CASE__ )
return diffs
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : bool = False ):
__UpperCamelCase =glob.glob(os.path.join(SCREAMING_SNAKE_CASE__ , '**/*.py' ) , recursive=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for filename in all_files:
__UpperCamelCase =is_copy_consistent(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(SCREAMING_SNAKE_CASE__ ) > 0:
__UpperCamelCase ='\n'.join(SCREAMING_SNAKE_CASE__ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_A = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 62 | import warnings
from ..trainer import Trainer
from ..utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Tuple , _lowercase : Optional[int]=None , **_lowercase : List[Any] ):
"""simple docstring"""
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , _lowercase , )
super().__init__(args=_lowercase , **_lowercase )
| 219 | 0 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=2 , lowercase_=5_6 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=2 , lowercase_=2 , lowercase_=7 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=4 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=2 , lowercase_=3 , ) -> str:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_attention_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = rescale_embeddings
lowerCAmelCase_ = attention_type
lowerCAmelCase_ = use_bias
lowerCAmelCase_ = block_size
lowerCAmelCase_ = num_random_blocks
def _lowercase ( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_attention_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class a_ ( a_ , unittest.TestCase ):
'''simple docstring'''
__a: Union[str, Any] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__a: Any = False
__a: Dict = False
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowercase ( self ) -> Any:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowercase ( self ) -> Dict:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCAmelCase_ = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_ = self._prepare_for_class(lowercase_ , lowercase_ )
lowerCAmelCase_ = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ , lowercase_=None , **lowercase_ ):
return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ )
with self.subTest('JIT Enabled' ):
lowerCAmelCase_ = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase_ = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=1e-5 , lowercase_="outputs" , lowercase_=None ) -> Optional[int]:
'''simple docstring'''
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 14 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
'''simple docstring'''
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 14 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
__A = SwinvaConfig()
__A = swinva_name.split("_" )
__A = name_split[1]
if "to" in name_split[3]:
__A = int(name_split[3][-3:] )
else:
__A = int(name_split[3] )
if "to" in name_split[2]:
__A = int(name_split[2][-2:] )
else:
__A = int(name_split[2][6:] )
if model_size == "tiny":
__A = 9_6
__A = (2, 2, 6, 2)
__A = (3, 6, 1_2, 2_4)
elif model_size == "small":
__A = 9_6
__A = (2, 2, 1_8, 2)
__A = (3, 6, 1_2, 2_4)
elif model_size == "base":
__A = 1_2_8
__A = (2, 2, 1_8, 2)
__A = (4, 8, 1_6, 3_2)
else:
__A = 1_9_2
__A = (2, 2, 1_8, 2)
__A = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
__A = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__A = 2_1_8_4_1
__A = "huggingface/label-files"
__A = "imagenet-22k-id2label.json"
__A = json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) )
__A = {int(a_ ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
else:
__A = 1_0_0_0
__A = "huggingface/label-files"
__A = "imagenet-1k-id2label.json"
__A = json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) )
__A = {int(a_ ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = img_size
__A = num_classes
__A = embed_dim
__A = depths
__A = num_heads
__A = window_size
return config
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
__A = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__A = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__A = "encoder." + name
if "attn.proj" in name:
__A = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__A = name.replace("attn" , "attention.self" )
if "norm1" in name:
__A = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__A = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__A = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__A = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
__A = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
__A = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
__A = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
__A = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
__A = "layernorm.weight"
if name == "norm.bias":
__A = "layernorm.bias"
if "head" in name:
__A = name.replace("head" , "classifier" )
else:
__A = "swinv2." + name
return name
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__A = orig_state_dict.pop(a_ )
if "mask" in key:
continue
elif "qkv" in key:
__A = key.split("." )
__A = int(key_split[1] )
__A = int(key_split[3] )
__A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__A = val[:dim, :]
__A = val[dim : dim * 2, :]
__A = val[-dim:, :]
else:
__A = val[:dim]
__A = val[
dim : dim * 2
]
__A = val[-dim:]
else:
__A = val
return orig_state_dict
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = timm.create_model(a_ , pretrained=a_ )
timm_model.eval()
__A = get_swinva_config(a_ )
__A = SwinvaForImageClassification(a_ )
model.eval()
__A = convert_state_dict(timm_model.state_dict() , a_ )
model.load_state_dict(a_ )
__A = "http://images.cocodataset.org/val2017/000000039769.jpg"
__A = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
__A = Image.open(requests.get(a_ , stream=a_ ).raw )
__A = image_processor(images=a_ , return_tensors="pt" )
__A = timm_model(inputs["pixel_values"] )
__A = model(**a_ ).logits
assert torch.allclose(a_ , a_ , atol=1E-3 )
print(F'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a_ )
model.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE :List[Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 15 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : int = 32 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _A : bool = True , _A : Tuple=7 , _A : Tuple=30 , _A : int=400 , _A : Tuple=3 , ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = parent
snake_case_ : str = do_resize
snake_case_ : str = size if size is not None else {'shortest_edge': 288}
snake_case_ : Any = size_divisor
snake_case_ : Any = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : int = do_center_crop
snake_case_ : str = image_mean
snake_case_ : int = image_std
snake_case_ : Any = do_pad
snake_case_ : Optional[int] = batch_size
snake_case_ : List[str] = num_channels
snake_case_ : Any = min_resolution
snake_case_ : str = max_resolution
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self : Dict , _A : str , _A : Union[str, Any]=False ) -> int:
"""simple docstring"""
if not batched:
snake_case_ : Optional[int] = self.size['shortest_edge']
snake_case_ : List[Any] = image_inputs[0]
if isinstance(_A , Image.Image ):
snake_case_ ,snake_case_ : Optional[Any] = image.size
else:
snake_case_ ,snake_case_ : str = image.shape[1], image.shape[2]
snake_case_ : Dict = size / min(_A , _A )
if h < w:
snake_case_ ,snake_case_ : str = size, scale * w
else:
snake_case_ ,snake_case_ : Tuple = scale * h, size
snake_case_ : Dict = int((1333 / 800) * size )
if max(_A , _A ) > max_size:
snake_case_ : Union[str, Any] = max_size / max(_A , _A )
snake_case_ : Any = newh * scale
snake_case_ : Union[str, Any] = neww * scale
snake_case_ ,snake_case_ : Any = int(newh + 0.5 ), int(neww + 0.5 )
snake_case_ ,snake_case_ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
snake_case_ : Optional[int] = []
for image in image_inputs:
snake_case_ ,snake_case_ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(_A , key=lambda _A : item[0] )[0]
snake_case_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: List[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
self.assertTrue(hasattr(_A , 'size_divisor' ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : str = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 327 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
snake_case = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = "src/transformers"
shutil.rmtree(self.transformer_dir )
def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE : List[Any] = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
SCREAMING_SNAKE_CASE : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
SCREAMING_SNAKE_CASE : Dict = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.transformer_dir , "new_code.py" )
with open(__SCREAMING_SNAKE_CASE , "w" , newline="\n" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , "r" ) as f:
self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _A ( self : Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , __SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE : List[str] = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub("Bert" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub("Bert" , "TestModel" , __SCREAMING_SNAKE_CASE ) , )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
SCREAMING_SNAKE_CASE : Optional[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
SCREAMING_SNAKE_CASE : List[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
SCREAMING_SNAKE_CASE : int = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme["format_model_list"] )
self.assertFalse(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
SCREAMING_SNAKE_CASE : Any = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
SCREAMING_SNAKE_CASE : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 363 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
snake_case = ["""small""", """medium""", """large"""]
snake_case = """lm_head.decoder.weight"""
snake_case = """lm_head.weight"""
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase )
SCREAMING_SNAKE_CASE : Any = d.pop(lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
torch.save(lowercase , os.path.join(lowercase , lowercase ) )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
snake_case = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
snake_case = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 319 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
SCREAMING_SNAKE_CASE : Optional[int] = 5_0003
SCREAMING_SNAKE_CASE : str = 5_0002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =PLBartTokenizer
lowerCamelCase__ =None
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : List[Any] = PLBartTokenizer(a_ , language_codes='''base''' , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = PLBartTokenizer(a_ , language_codes='''base''' , keep_accents=a_ )
__snake_case : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__snake_case : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__snake_case : Dict = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case : List[str] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__snake_case : List[Any] = tokenizer.vocab_size
__snake_case : List[Any] = [tokenizer.convert_ids_to_tokens(a_ ) for x in range(end - 4 , a_ )]
self.assertListEqual(a_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
__snake_case : int = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__snake_case : List[str] = tokenizer(a_ ).input_ids
self.assertEqual(
tokenizer.decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ ) , a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = PLBartTokenizer(a_ , language_codes='''multi''' , keep_accents=a_ )
__snake_case : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__snake_case : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__snake_case : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case : Any = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__snake_case : List[Any] = tokenizer.vocab_size
__snake_case : List[Any] = [tokenizer.convert_ids_to_tokens(a_ ) for x in range(end - 7 , a_ )]
self.assertListEqual(
a_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
__snake_case : Dict = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__snake_case : int = tokenizer(a_ ).input_ids
self.assertEqual(
tokenizer.decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ ) , a_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ ='uclanlp/plbart-python-en_XX'
lowerCamelCase__ =[
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
lowerCamelCase__ =[
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
lowerCamelCase__ =[
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def SCREAMING_SNAKE_CASE (cls ):
'''simple docstring'''
__snake_case : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
__snake_case : Optional[int] = 1
return cls
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 5_00_03 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.assertIn(a_ , self.tokenizer.all_special_ids )
__snake_case : Optional[int] = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
__snake_case : Tuple = self.tokenizer.decode(a_ , skip_special_tokens=a_ )
__snake_case : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a_ )
self.assertEqual(a_ , a_ )
self.assertNotIn(self.tokenizer.eos_token , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , a_ )
__snake_case : int = 10
__snake_case : Optional[Any] = self.tokenizer(a_ , max_length=a_ , truncation=a_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a_ )
self.assertEqual(len(a_ ) , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [5_00_04, 5_00_01] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = tempfile.mkdtemp()
__snake_case : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a_ )
__snake_case : List[str] = PLBartTokenizer.from_pretrained(a_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a_ )
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a_ , return_tensors='''pt''' )
__snake_case : Optional[int] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a_ , truncation=a_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__snake_case : List[str] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
__snake_case : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.tokenizer(self.src_text , padding=a_ , truncation=a_ , max_length=3 , return_tensors='''pt''' )
__snake_case : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=a_ , truncation=a_ , max_length=10 , return_tensors='''pt''' )
__snake_case : Optional[int] = targets['''input_ids''']
__snake_case : Optional[Any] = shift_tokens_right(a_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(a_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[1_50, 2_42, 2, 5_00_03]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 5_00_01,
} , )
| 102 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(__a )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "rag"
lowercase__ = True
def __init__( self: Union[str, Any], a_: int=None, a_: Tuple=True, a_: Optional[int]=None, a_: List[str]=None, a_: int=None, a_: Optional[Any]=None, a_: List[str]=None, a_: Optional[Any]=" / ", a_: Tuple=" // ", a_: List[Any]=5, a_: Dict=300, a_: Tuple=768, a_: Optional[Any]=8, a_: int="wiki_dpr", a_: Any="train", a_: Optional[int]="compressed", a_: Optional[int]=None, a_: List[Any]=None, a_: Optional[Any]=False, a_: str=False, a_: Dict=0.0, a_: Union[str, Any]=True, a_: Union[str, Any]=False, a_: str=False, a_: List[str]=False, a_: Union[str, Any]=True, a_: Any=None, **a_: List[Any], ):
'''simple docstring'''
super().__init__(
bos_token_id=a_, pad_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, forced_eos_token_id=a_, is_encoder_decoder=a_, prefix=a_, vocab_size=a_, **a_, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_snake_case : Union[str, Any] = kwargs.pop("""question_encoder""" )
_snake_case : List[str] = question_encoder_config.pop("""model_type""" )
_snake_case : Union[str, Any] = kwargs.pop("""generator""" )
_snake_case : Any = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
_snake_case : Union[str, Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Optional[Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Any = reduce_loss
_snake_case : Optional[int] = label_smoothing
_snake_case : Dict = exclude_bos_score
_snake_case : int = do_marginalize
_snake_case : Optional[Any] = title_sep
_snake_case : Any = doc_sep
_snake_case : List[str] = n_docs
_snake_case : Tuple = max_combined_length
_snake_case : Optional[Any] = dataset
_snake_case : Union[str, Any] = dataset_split
_snake_case : Tuple = index_name
_snake_case : Any = retrieval_vector_size
_snake_case : Union[str, Any] = retrieval_batch_size
_snake_case : str = passages_path
_snake_case : Tuple = index_path
_snake_case : List[Any] = use_dummy_dataset
_snake_case : Optional[Any] = output_retrieved
_snake_case : Tuple = do_deduplication
_snake_case : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_snake_case : Dict = getattr(self.generator, """forced_eos_token_id""", a_ )
@classmethod
def UpperCamelCase_ ( cls: Any, a_: PretrainedConfig, a_: PretrainedConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : List[str] = self.question_encoder.to_dict()
_snake_case : Tuple = self.generator.to_dict()
_snake_case : Dict = self.__class__.model_type
return output
| 64 | 0 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
random.seed(UpperCAmelCase__ )
np.random.seed(UpperCAmelCase__ )
torch.manual_seed(UpperCAmelCase__ )
torch.cuda.manual_seed_all(UpperCAmelCase__ )
# ^^ safe to call this function even if cuda is not available
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = 0.9999 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 0 , UpperCamelCase__ = False , UpperCamelCase__ = 1.0 , UpperCamelCase__ = 2 / 3 , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Any:
'''simple docstring'''
if isinstance(_a , torch.nn.Module ):
A_ = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , _a , standard_warn=_a , )
A_ = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A_ = True
if kwargs.get("""max_value""" , _a ) is not None:
A_ = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("""max_value""" , """1.0.0""" , _a , standard_warn=_a )
A_ = kwargs["max_value"]
if kwargs.get("""min_value""" , _a ) is not None:
A_ = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("""min_value""" , """1.0.0""" , _a , standard_warn=_a )
A_ = kwargs["min_value"]
A_ = list(_a )
A_ = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , _a ) is not None:
A_ = "The `device` argument is deprecated. Please use `to` instead."
deprecate("""device""" , """1.0.0""" , _a , standard_warn=_a )
self.to(device=kwargs["""device"""] )
A_ = None
A_ = decay
A_ = min_decay
A_ = update_after_step
A_ = use_ema_warmup
A_ = inv_gamma
A_ = power
A_ = 0
A_ = None # set in `step()`
A_ = model_cls
A_ = model_config
@classmethod
def snake_case_ ( cls , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = model_cls.load_config(_a , return_unused_kwargs=_a )
A_ = model_cls.from_pretrained(_a )
A_ = cls(model.parameters() , model_cls=_a , model_config=model.config )
ema_model.load_state_dict(_a )
return ema_model
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
A_ = self.model_cls.from_config(self.model_config )
A_ = self.state_dict()
state_dict.pop("""shadow_params""" , _a )
model.register_to_config(**_a )
self.copy_to(model.parameters() )
model.save_pretrained(_a )
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A_ = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
A_ = (1 + step) / (10 + step)
A_ = min(_a , self.decay )
# make sure decay is not smaller than min_decay
A_ = max(_a , self.min_decay )
return cur_decay_value
@torch.no_grad()
def snake_case_ ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
if isinstance(_a , torch.nn.Module ):
A_ = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , _a , standard_warn=_a , )
A_ = parameters.parameters()
A_ = list(_a )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A_ = self.get_decay(self.optimization_step )
A_ = decay
A_ = 1 - decay
A_ = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _a ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A_ = deepspeed.zero.GatheredParameters(_a , modifier_rank=_a )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_a )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = list(_a )
for s_param, param in zip(self.shadow_params , _a ):
param.data.copy_(s_param.to(param.device ).data )
def snake_case_ ( self , UpperCamelCase__=None , UpperCamelCase__=None ) -> int:
'''simple docstring'''
A_ = [
p.to(device=_a , dtype=_a ) if p.is_floating_point() else p.to(device=_a )
for p in self.shadow_params
]
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = [param.detach().cpu().clone() for param in parameters]
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , _a ):
param.data.copy_(c_param.data )
# Better memory-wise.
A_ = None
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = copy.deepcopy(_a )
A_ = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
A_ = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , _a ):
raise ValueError("""Invalid min_decay""" )
A_ = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , _a ):
raise ValueError("""Invalid optimization_step""" )
A_ = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , _a ):
raise ValueError("""Invalid update_after_step""" )
A_ = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _a ):
raise ValueError("""Invalid use_ema_warmup""" )
A_ = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
A_ = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
A_ = state_dict.get("""shadow_params""" , _a )
if shadow_params is not None:
A_ = shadow_params
if not isinstance(self.shadow_params , _a ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(_a , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 370 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = 1
A_ = 3
A_ = (32, 32)
A_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase__ )
return image
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
def extract(*UpperCamelCase__ , **UpperCamelCase__ ):
class A__ :
def __init__( self ) -> Dict:
'''simple docstring'''
A_ = torch.ones([0] )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
self.pixel_values.to(UpperCamelCase__ )
return self
return Out()
return extract
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ = self.dummy_cond_unet
A_ = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
A_ = self.dummy_vae
A_ = self.dummy_text_encoder
A_ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A_ = 77
A_ = self.dummy_image.to(UpperCamelCase__ )
A_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A_ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=self.dummy_extractor , )
A_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase__ )
A_ = alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """A painting of a squirrel eating a burger"""
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A_ = alt_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase__ , )
A_ = output.images
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A_ = alt_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -3:, -3:, -1]
A_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.dummy_cond_unet
A_ = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
A_ = self.dummy_vae
A_ = self.dummy_text_encoder
A_ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A_ = 77
A_ = self.dummy_image.to(UpperCamelCase__ )
# put models in fp16
A_ = unet.half()
A_ = vae.half()
A_ = bert.half()
# make sure here that pndm scheduler skips prk
A_ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=self.dummy_extractor , )
A_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase__ )
A_ = alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """A painting of a squirrel eating a burger"""
A_ = torch.manual_seed(0 )
A_ = alt_pipe(
[prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A_ = init_image.resize((760, 504) )
A_ = """BAAI/AltDiffusion"""
A_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A_ = """A fantasy landscape, trending on artstation"""
A_ = torch.manual_seed(0 )
A_ = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase__ , output_type="""np""" , )
A_ = output.images[0]
A_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
A_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A_ = init_image.resize((768, 512) )
A_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A_ = """BAAI/AltDiffusion"""
A_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A_ = """A fantasy landscape, trending on artstation"""
A_ = torch.manual_seed(0 )
A_ = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase__ , output_type="""np""" , )
A_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 101 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A : List[str] = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(UpperCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 273 | 1 |
"""simple docstring"""
import random
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase = a[left_index]
lowercase = left_index + 1
for j in range(left_index + 1 , lowerCAmelCase__ ):
if a[j] < pivot:
lowercase , lowercase = a[i], a[j]
i += 1
lowercase , lowercase = a[i - 1], a[left_index]
return i - 1
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if left < right:
lowercase = random.randint(lowerCAmelCase__ , right - 1 )
lowercase , lowercase = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowercase = partition(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
quick_sort_random(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCAmelCase__ , pivot_index + 1 , lowerCAmelCase__ ) # recursive quicksort to the right of the pivot point
def UpperCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
lowercase = input("""Enter numbers separated by a comma:\n""" ).strip()
lowercase = [int(lowerCAmelCase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowerCAmelCase__ , 0 , len(lowerCAmelCase__ ) )
print(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 32 | """simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = Mock()
lowercase = conn, Mock()
lowercase = iter([1, None] )
lowercase = lambda lowerCAmelCase__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 32 | 1 |
'''simple docstring'''
import operator
def lowercase_ ( _lowercase , _lowercase = False , _lowercase = None ) -> list:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = operator.lt if reverse else operator.gt
lowerCamelCase_ : Optional[int] = solution or []
if not arr:
return solution
lowerCamelCase_ : List[Any] = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase , sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
lowerCamelCase_ : List[str] = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase , _lowercase ):
solution.insert(_lowercase , _lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase , _lowercase , _lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 318 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase_ : List[Any] = '\\n\n'
lowerCAmelCase_ : Optional[Any] = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCAmelCase_ : Optional[int] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def UpperCamelCase__ ( self : List[str] , __a : Optional[int] , __a : Optional[int] , __a : int = 16 , __a : bool = True , __a : Dict=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_a = "cuda"
else:
_a = "cuda" if torch.cuda.is_available() else "cpu"
_a = AutoModelForCausalLM.from_pretrained(__a )
_a = model.to(__a )
_a = AutoTokenizer.from_pretrained(__a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_a = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_a = model.config.max_length - 1
else:
_a = model.config.max_length
_a = tokenizer(
__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , return_tensors="pt" , return_attention_mask=__a , ).to(__a )
_a = encodings["input_ids"]
_a = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_a = []
_a = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(__a ) , __a ) ):
_a = min(start_index + batch_size , len(__a ) )
_a = encoded_texts[start_index:end_index]
_a = attn_masks[start_index:end_index]
if add_start_token:
_a = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__a )
_a = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_a = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__a ), attn_mask] , dim=1 )
_a = encoded_batch
with torch.no_grad():
_a = model(__a , attention_mask=__a ).logits
_a = out_logits[..., :-1, :].contiguous()
_a = labels[..., 1:].contiguous()
_a = attn_mask[..., 1:].contiguous()
_a = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__a )}
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 6008_5147_5143 ) -> int:
try:
_a = int(lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 346 | 1 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 267 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
__SCREAMING_SNAKE_CASE = PNDMScheduler()
__SCREAMING_SNAKE_CASE = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pndm.to(__SCREAMING_SNAKE_CASE )
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type="""numpy""" ).images
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type="""numpy""" , return_dict=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """google/ddpm-cifar10-32"""
__SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = PNDMScheduler()
__SCREAMING_SNAKE_CASE = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pndm.to(__SCREAMING_SNAKE_CASE )
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pndm(generator=__SCREAMING_SNAKE_CASE , output_type="""numpy""" ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 267 | 1 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.dummy_uncond_unet
snake_case_ : Optional[Any] = PNDMScheduler()
snake_case_ : Optional[Any] = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pndm.to(__magic_name__ )
pndm.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Dict = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type='''numpy''' ).images
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : str = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type='''numpy''' , return_dict=__magic_name__ )[0]
snake_case_ : Any = image[0, -3:, -3:, -1]
snake_case_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : str = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = '''google/ddpm-cifar10-32'''
snake_case_ : Tuple = UNetaDModel.from_pretrained(__magic_name__ )
snake_case_ : Optional[Any] = PNDMScheduler()
snake_case_ : Any = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pndm.to(__magic_name__ )
pndm.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : Tuple = pndm(generator=__magic_name__ , output_type='''numpy''' ).images
snake_case_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : str = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 358 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ : List[Any] = '''BlipImageProcessor'''
lowerCamelCase_ : Union[str, Any] = '''AutoTokenizer'''
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
super().__init__(__magic_name__ , __magic_name__ )
# add QFormer tokenizer
snake_case_ : Optional[Any] = qformer_tokenizer
def __call__(self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
snake_case_ : Tuple = BatchFeature()
if text is not None:
snake_case_ : Tuple = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
encoding.update(__magic_name__ )
snake_case_ : Optional[Any] = self.qformer_tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
snake_case_ : Optional[int] = qformer_text_encoding.pop('''input_ids''' )
snake_case_ : Tuple = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
snake_case_ : Any = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
encoding.update(__magic_name__ )
return encoding
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase (self , __magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
snake_case_ : Any = os.path.join(__magic_name__ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(__magic_name__ )
return super().save_pretrained(__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = AutoTokenizer.from_pretrained(__magic_name__ , subfolder='''qformer_tokenizer''' )
snake_case_ : str = cls._get_arguments_from_pretrained(__magic_name__ , **__magic_name__ )
args.append(__magic_name__ )
return cls(*__magic_name__ )
| 279 | 0 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__A = TypeVar("KEY")
__A = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE__ , slots=SCREAMING_SNAKE_CASE__ )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
class __lowerCAmelCase ( _Item ):
"""simple docstring"""
def __init__( self ) -> None:
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __bool__( self ) -> bool:
'''simple docstring'''
return False
__A = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.75 ) -> None:
'''simple docstring'''
__lowerCamelCase = initial_block_size
__lowerCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowerCamelCase = capacity_factor
__lowerCamelCase = 0
def lowercase_ ( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
return hash(lowerCamelCase__ ) % len(self._buckets )
def lowercase_ ( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
'''simple docstring'''
__lowerCamelCase = self._buckets[ind]
if not stored:
__lowerCamelCase = _Item(lowerCamelCase__ , lowerCamelCase__ )
self._len += 1
return True
elif stored.key == key:
__lowerCamelCase = _Item(lowerCamelCase__ , lowerCamelCase__ )
return True
else:
return False
def lowercase_ ( self ) -> bool:
'''simple docstring'''
__lowerCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase__ )
def lowercase_ ( self ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__lowerCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowercase_ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
__lowerCamelCase = self._buckets
__lowerCamelCase = [None] * new_size
__lowerCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowercase_ ( self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def lowercase_ ( self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def lowercase_ ( self , lowerCamelCase__ ) -> Iterator[int]:
'''simple docstring'''
__lowerCamelCase = self._get_bucket_index(lowerCamelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowerCamelCase = self._get_next_ind(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase__ ):
if self._try_set(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
break
def __setitem__( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase__ , lowerCamelCase__ )
def __delitem__( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase__ ):
__lowerCamelCase = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase__ )
if item is _deleted:
continue
if item.key == key:
__lowerCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , lowerCamelCase__ ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase__ ):
__lowerCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase__ )
def __len__( self ) -> int:
'''simple docstring'''
return self._len
def __iter__( self ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
'''simple docstring'''
__lowerCamelCase = ' ,'.join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 90 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@slow
def lowercase ( self : List[Any] ) -> List[Any]:
lowercase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
lowercase : Dict = AutoTokenizer.from_pretrained('google/mt5-small' )
lowercase : List[Any] = tokenizer('Hello there', return_tensors='tf' ).input_ids
lowercase : Any = tokenizer('Hi I am', return_tensors='tf' ).input_ids
lowercase : Dict = model(lowerCAmelCase, labels=lowerCAmelCase ).loss
lowercase : Optional[int] = -tf.math.reduce_mean(lowerCAmelCase ).numpy()
lowercase : Tuple = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 255 | 0 |
import baseaa
def UpperCAmelCase_ (_lowerCAmelCase : str ):
return baseaa.baaencode(string.encode("utf-8" ) )
def UpperCAmelCase_ (_lowerCAmelCase : bytes ):
return baseaa.baadecode(a_ ).decode("utf-8" )
if __name__ == "__main__":
lowercase : List[Any] = "Hello World!"
lowercase : Union[str, Any] = baseaa_encode(test)
print(encoded)
lowercase : Optional[Any] = baseaa_decode(encoded)
print(decoded) | 367 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Union[str, Any] = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = ["GLPNFeatureExtractor"]
lowercase : Tuple = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 171 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = KandinskyInpaintPipeline
__lowerCAmelCase : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
__lowerCAmelCase : Union[str, Any] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
__lowerCAmelCase : Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__lowerCAmelCase : str = False
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return 100
@property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Dict = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCAmelCase : Any = MultilingualCLIP(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = text_encoder.eval()
return text_encoder
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase : Tuple = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase : Any = self.dummy_tokenizer
UpperCAmelCase : int = self.dummy_unet
UpperCAmelCase : str = self.dummy_movq
UpperCAmelCase : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Optional[int] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_SCREAMING_SNAKE_CASE )
# create init_image
UpperCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : str = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
UpperCAmelCase : Optional[int] = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase : str = 0
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
UpperCAmelCase : Dict = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = """cpu"""
UpperCAmelCase : str = self.get_dummy_components()
UpperCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : str = output.images
UpperCAmelCase : List[Any] = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : int = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
UpperCAmelCase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCAmelCase : int = np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = """a hat"""
UpperCAmelCase : Optional[int] = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
UpperCAmelCase : List[Any] = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase : Tuple = pipe_prior(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase : int = pipeline(
_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 109 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """vivit"""
def __init__( self : List[str] , _UpperCAmelCase : List[Any]=2_24 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=[2, 16, 16] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu_fast" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1E-06 , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : List[Any] , ):
"""simple docstring"""
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = num_frames
UpperCAmelCase__ = tubelet_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = qkv_bias
super().__init__(**_UpperCAmelCase )
| 346 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Any , a :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> List[Any]:
super().__init__()
__UpperCamelCase : Optional[Any] = nn.ModuleList(a )
def _lowerCamelCase ( self :Optional[int] , a :torch.FloatTensor , a :Union[torch.Tensor, float, int] , a :torch.Tensor , a :List[torch.tensor] , a :List[float] , a :Optional[torch.Tensor] = None , a :Optional[torch.Tensor] = None , a :Optional[torch.Tensor] = None , a :Optional[Dict[str, Any]] = None , a :bool = False , a :bool = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(a , a , self.nets ) ):
__UpperCamelCase , __UpperCamelCase : Optional[Any] = controlnet(
a , a , a , a , a , a , a , a , a , a , a , )
# merge samples
if i == 0:
__UpperCamelCase , __UpperCamelCase : Dict = down_samples, mid_sample
else:
__UpperCamelCase : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a , a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowerCamelCase ( self :int , a :Union[str, os.PathLike] , a :bool = True , a :Callable = None , a :bool = False , a :Optional[str] = None , ) -> List[Any]:
__UpperCamelCase : Tuple = 0
__UpperCamelCase : Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a , is_main_process=a , save_function=a , safe_serialization=a , variant=a , )
idx += 1
__UpperCamelCase : str = model_path_to_save + f'_{idx}'
@classmethod
def _lowerCamelCase ( cls :List[Any] , a :Optional[Union[str, os.PathLike]] , **a :str ) -> Tuple:
__UpperCamelCase : Tuple = 0
__UpperCamelCase : int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__UpperCamelCase : Optional[Any] = pretrained_model_path
while os.path.isdir(a ):
__UpperCamelCase : Any = ControlNetModel.from_pretrained(a , **a )
controlnets.append(a )
idx += 1
__UpperCamelCase : Tuple = pretrained_model_path + f'_{idx}'
logger.info(f'{len(a )} controlnets loaded from {pretrained_model_path}.' )
if len(a ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(a )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(a ) | 151 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : str) -> Any:
'''simple docstring'''
__UpperCamelCase : Dict = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple) -> Any:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = 0
while b > 0:
if b & 1:
__UpperCamelCase : str = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 151 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = list(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ):
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ )
snake_case_ = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1):
snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 8 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : str ) -> Dict:
'''simple docstring'''
A__ = "ZinengTang/tvlt-base"
A__ = tempfile.mkdtemp()
def __magic_name__ ( self : int , **snake_case_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **snake_case_ )
def __magic_name__ ( self : Optional[int] , **snake_case_ : str ) -> List[str]:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __magic_name__ ( self : Optional[int] ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : Tuple ) -> Any:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
A__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
self.assertIsInstance(processor.image_processor , snake_case_ )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=snake_case_ , feature_extractor=snake_case_ )
A__ = np.ones([12_000] )
A__ = feature_extractor(snake_case_ , return_tensors="np" )
A__ = processor(audio=snake_case_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=snake_case_ , feature_extractor=snake_case_ )
A__ = np.ones([3, 224, 224] )
A__ = image_processor(snake_case_ , return_tensors="np" )
A__ = processor(images=snake_case_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=snake_case_ , feature_extractor=snake_case_ )
A__ = np.ones([12_000] )
A__ = np.ones([3, 224, 224] )
A__ = processor(audio=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __magic_name__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 362 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : str ) -> Dict:
'''simple docstring'''
A__ = "ZinengTang/tvlt-base"
A__ = tempfile.mkdtemp()
def __magic_name__ ( self : int , **snake_case_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **snake_case_ )
def __magic_name__ ( self : Optional[int] , **snake_case_ : str ) -> List[str]:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __magic_name__ ( self : Optional[int] ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : Tuple ) -> Any:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
A__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
self.assertIsInstance(processor.image_processor , snake_case_ )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=snake_case_ , feature_extractor=snake_case_ )
A__ = np.ones([12_000] )
A__ = feature_extractor(snake_case_ , return_tensors="np" )
A__ = processor(audio=snake_case_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=snake_case_ , feature_extractor=snake_case_ )
A__ = np.ones([3, 224, 224] )
A__ = image_processor(snake_case_ , return_tensors="np" )
A__ = processor(images=snake_case_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=snake_case_ , feature_extractor=snake_case_ )
A__ = np.ones([12_000] )
A__ = np.ones([3, 224, 224] )
A__ = processor(audio=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __magic_name__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 230 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE : Dict = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _snake_case : int | str ) ->bool:
"""simple docstring"""
__snake_case : List[str] = str(_snake_case )
return n == n[::-1]
def lowercase ( _snake_case : int = 1_000_000 ) ->str:
"""simple docstring"""
__snake_case : Union[str, Any] = 0
for i in range(1 , _snake_case ):
if is_palindrome(_snake_case ) and is_palindrome(bin(_snake_case ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 102 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCamelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = 10
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = [1, 2, 3, 4]
UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(snake_case__ , self.block_size , 0 ) , snake_case__ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case__ , self.block_size , 0 ) , snake_case__ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case__ , self.block_size , 0 ) , snake_case__ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
UpperCAmelCase , UpperCAmelCase = process_story(snake_case__ )
self.assertEqual(snake_case__ , [] )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = """"""
UpperCAmelCase , UpperCAmelCase = process_story(snake_case__ )
self.assertEqual(snake_case__ , [] )
self.assertEqual(snake_case__ , [] )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
UpperCAmelCase , UpperCAmelCase = process_story(snake_case__ )
UpperCAmelCase = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(snake_case__ , snake_case__ )
UpperCAmelCase = ["""It was the best of times."""]
self.assertEqual(snake_case__ , snake_case__ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(snake_case__ , 0 ).numpy() , expected.numpy() )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case__ , 23 ).numpy() , expected.numpy() )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case__ , 1 ).numpy() , expected.numpy() )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = 1_01
UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCAmelCase = compute_token_type_ids(snake_case__ , snake_case__ )
np.testing.assert_array_equal(snake_case__ , snake_case__ )
| 248 |
"""simple docstring"""
import os
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = len(grid[0] )
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowerCAmelCase ):
for j in range(n_rows - 3 ):
UpperCAmelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
UpperCAmelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
UpperCAmelCase = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
UpperCAmelCase = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
UpperCAmelCase = max(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if max_product > largest:
UpperCAmelCase = max_product
return largest
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = []
with open(os.path.dirname(lowerCAmelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
UpperCAmelCase = [[int(lowerCAmelCase ) for i in grid[j]] for j in range(len(lowerCAmelCase ) )]
return largest_product(lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 248 | 1 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : List[Any]=56 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Optional[Any]="gelu_new" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[str]="block_sparse" , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Union[str, Any]=3 , ) ->Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_choices
A__ = rescale_embeddings
A__ = attention_type
A__ = use_bias
A__ = block_size
A__ = num_random_blocks
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_attention_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A__ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = FlaxBigBirdModelTester(self)
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained('''google/bigbird-roberta-base''')
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)
A__ = model_class(UpperCAmelCase__)
@jax.jit
def model_jitted(UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : Tuple):
return model(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , **UpperCAmelCase__)
with self.subTest('''JIT Enabled'''):
A__ = model_jitted(**UpperCAmelCase__).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
A__ = model_jitted(**UpperCAmelCase__).to_tuple()
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__))
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assertEqual(jitted_output.shape , output.shape)
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]="outputs" , UpperCAmelCase__ : Optional[int]=None) ->List[str]:
'''simple docstring'''
if name.startswith('''outputs.attentions'''):
return
else:
super().check_pt_flax_outputs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
| 14 |
_lowerCamelCase : Optional[int] = 65521
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = 1
A__ = 0
for plain_chr in plain_text:
A__ = (a + ord(lowercase_ )) % MOD_ADLER
A__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 14 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ ( metaclass=UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = ["flax"]
def __init__( self : str , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Any ) -> str:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : List[str] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : int ) -> Any:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : Optional[int] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : str ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class UpperCamelCase_ ( metaclass=UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = ["flax"]
def __init__( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Dict ) -> Optional[int]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : int , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[Any] ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : List[Any] ) -> int:
requires_backends(cls , ["flax"] )
class UpperCamelCase_ ( metaclass=UpperCamelCase):
"""simple docstring"""
snake_case__ : int = ["flax"]
def __init__( self : str , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : List[str] ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : int , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : int , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : str ) -> List[str]:
requires_backends(cls , ["flax"] )
class UpperCamelCase_ ( metaclass=UpperCamelCase):
"""simple docstring"""
snake_case__ : int = ["flax"]
def __init__( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> str:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> str:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : List[str] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : List[Any] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
class UpperCamelCase_ ( metaclass=UpperCamelCase):
"""simple docstring"""
snake_case__ : List[str] = ["flax"]
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : Tuple , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : int , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Tuple ) -> List[Any]:
requires_backends(cls , ["flax"] )
class UpperCamelCase_ ( metaclass=UpperCamelCase):
"""simple docstring"""
snake_case__ : Dict = ["flax"]
def __init__( self : Any , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : Any , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Tuple ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : int , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Any ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
class UpperCamelCase_ ( metaclass=UpperCamelCase):
"""simple docstring"""
snake_case__ : Union[str, Any] = ["flax"]
def __init__( self : Any , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : int , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Dict ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : Optional[int] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : int ) -> int:
requires_backends(cls , ["flax"] )
class UpperCamelCase_ ( metaclass=UpperCamelCase):
"""simple docstring"""
snake_case__ : Union[str, Any] = ["flax"]
def __init__( self : str , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : List[Any] ) -> List[str]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
class UpperCamelCase_ ( metaclass=UpperCamelCase):
"""simple docstring"""
snake_case__ : Dict = ["flax"]
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : List[Any] ) -> Any:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : List[str] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : Tuple , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> List[Any]:
requires_backends(cls , ["flax"] )
class UpperCamelCase_ ( metaclass=UpperCamelCase):
"""simple docstring"""
snake_case__ : List[Any] = ["flax"]
def __init__( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : Any , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : List[Any] ) -> Any:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : Union[str, Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class UpperCamelCase_ ( metaclass=UpperCamelCase):
"""simple docstring"""
snake_case__ : str = ["flax"]
def __init__( self : List[Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : Tuple , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : int ) -> str:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : Tuple , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any ) -> Any:
requires_backends(cls , ["flax"] )
class UpperCamelCase_ ( metaclass=UpperCamelCase):
"""simple docstring"""
snake_case__ : str = ["flax"]
def __init__( self : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : str ) -> Optional[int]:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : Optional[int] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Union[str, Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[Any] ) -> Any:
requires_backends(cls , ["flax"] )
class UpperCamelCase_ ( metaclass=UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = ["flax"]
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict ) -> str:
requires_backends(self , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def UpperCAmelCase_ ( cls : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
| 195 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 195 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =AltDiffusionPipeline
__UpperCAmelCase : str =TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : Dict =TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : Any =TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self ):
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
__lowerCAmelCase = CLIPTextModel(__a )
__lowerCAmelCase = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__lowerCAmelCase = 77
__lowerCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case ( self , __a , __a=0 ):
if str(__a ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(__a )
else:
__lowerCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case ( self ):
__lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
torch.manual_seed(0 )
__lowerCAmelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowerCAmelCase = RobertaSeriesModelWithTransformation(__a )
__lowerCAmelCase = text_encoder
__lowerCAmelCase = AltDiffusionPipeline(**__a )
__lowerCAmelCase = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = self.get_dummy_inputs(__a )
__lowerCAmelCase = "A photo of an astronaut"
__lowerCAmelCase = alt_pipe(**__a )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
__lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__lowerCAmelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowerCAmelCase = RobertaSeriesModelWithTransformation(__a )
__lowerCAmelCase = text_encoder
__lowerCAmelCase = AltDiffusionPipeline(**__a )
__lowerCAmelCase = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = self.get_dummy_inputs(__a )
__lowerCAmelCase = alt_pipe(**__a )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
# make sure here that pndm scheduler skips prk
__lowerCAmelCase = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=__a )
__lowerCAmelCase = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = alt_pipe([prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
__lowerCAmelCase = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
__lowerCAmelCase = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=__a , safety_checker=__a )
__lowerCAmelCase = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = alt_pipe([prompt] , generator=__a , num_inference_steps=2 , output_type="numpy" )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 57 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
UpperCamelCase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
UpperCamelCase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE( ) -> Dict:
A: Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
A: Union[str, Any] = bs[:]
A: List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
A: List[Any] = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: Optional[Any] = set()
A: Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A: List[Any] = char
return pairs
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]:
'''simple docstring'''
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
A: str = json.load(SCREAMING_SNAKE_CASE_ )
A: str = {v: k for k, v in self.encoder.items()}
A: Union[str, Any] = errors # how to handle errors in decoding
A: Optional[int] = bytes_to_unicode()
A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
A: int = merges_handle.read().split('''\n''' )[1:-1]
A: str = [tuple(merge.split() ) for merge in bpe_merges]
A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = {}
A: Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A: str = tuple(SCREAMING_SNAKE_CASE_ )
A: str = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
A , A: Optional[Any] = bigram
A: Tuple = []
A: List[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A: int = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ )
A: Any = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ )
A: str = word
return word
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A: Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
A: Tuple = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) )
return bpe_tokens
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ )
A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A: Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
A: int = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
A: Any = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
A: Union[str, Any] = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: int = [self.cls_token_id]
A: str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A: Dict = [self.sep_token_id]
A: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
'''simple docstring'''
A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
A: List[Any] = ''' ''' + text
return (text, kwargs)
| 319 | 0 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase ( unittest.TestCase):
@slow
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
UpperCamelCase__ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCamelCase__ = tokenizer("Hello there", return_tensors="np" ).input_ids
UpperCamelCase__ = tokenizer("Hi I am", return_tensors="np" ).input_ids
UpperCamelCase__ = shift_tokens_right(a_, model.config.pad_token_id, model.config.decoder_start_token_id )
UpperCamelCase__ = model(a_, decoder_input_ids=a_ ).logits
UpperCamelCase__ = optax.softmax_cross_entropy(a_, onehot(a_, logits.shape[-1] ) ).mean()
UpperCamelCase__ = -(labels.shape[-1] * loss.item())
UpperCamelCase__ = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 ) | 31 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = """encodec"""
def __init__( self : Any , __UpperCAmelCase : List[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , __UpperCAmelCase : str=24000 , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[Any]=128 , __UpperCAmelCase : Any=32 , __UpperCAmelCase : str=1 , __UpperCAmelCase : str=[8, 5, 4, 2] , __UpperCAmelCase : Union[str, Any]="weight_norm" , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Any=True , __UpperCAmelCase : str="reflect" , __UpperCAmelCase : Any=2 , __UpperCAmelCase : int=2 , __UpperCAmelCase : str=1.0 , __UpperCAmelCase : str=1024 , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]=True , **__UpperCAmelCase : Dict , ):
a : Any = target_bandwidths
a : str = sampling_rate
a : Union[str, Any] = audio_channels
a : Optional[int] = normalize
a : Optional[Any] = chunk_length_s
a : str = overlap
a : List[str] = hidden_size
a : List[str] = num_filters
a : Union[str, Any] = num_residual_layers
a : Tuple = upsampling_ratios
a : Tuple = norm_type
a : List[str] = kernel_size
a : Tuple = last_kernel_size
a : Union[str, Any] = residual_kernel_size
a : Union[str, Any] = dilation_growth_rate
a : Any = use_causal_conv
a : str = pad_mode
a : Dict = compress
a : Union[str, Any] = num_lstm_layers
a : List[Any] = trim_right_ratio
a : str = codebook_size
a : Optional[int] = codebook_dim if codebook_dim is not None else hidden_size
a : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''')
super().__init__(**__UpperCAmelCase)
@property
def __snake_case ( self : Union[str, Any]):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def __snake_case ( self : Union[str, Any]):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def __snake_case ( self : List[str]):
a : Optional[int] = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def __snake_case ( self : List[str]):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10))
| 40 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ :int = logging.get_logger(__name__)
lowercase__ :Any = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[Any] ='''poolformer'''
def __init__( self ,A__=3 ,A__=1_6 ,A__=1_6 ,A__=3 ,A__=4.0 ,A__=[2, 2, 6, 2] ,A__=[6_4, 1_2_8, 3_2_0, 5_1_2] ,A__=[7, 3, 3, 3] ,A__=[4, 2, 2, 2] ,A__=[2, 1, 1, 1] ,A__=4 ,A__=0.0 ,A__="gelu" ,A__=True ,A__=1E-5 ,A__=0.02 ,**A__ ,):
lowercase = num_channels
lowercase = patch_size
lowercase = stride
lowercase = padding
lowercase = pool_size
lowercase = hidden_sizes
lowercase = mlp_ratio
lowercase = depths
lowercase = patch_sizes
lowercase = strides
lowercase = num_encoder_blocks
lowercase = drop_path_rate
lowercase = hidden_act
lowercase = use_layer_scale
lowercase = layer_scale_init_value
lowercase = initializer_range
super().__init__(**A__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[Any] =version.parse('''1.11''' )
@property
def A__ ( self):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def A__ ( self):
return 2E-3
| 101 | 0 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = '''T5Config'''
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> jnp.ndarray:
'''simple docstring'''
_A = jnp.zeros_like(__lowercase )
_A = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_A = shifted_input_ids.at[:, 0].set(__lowercase )
_A = jnp.where(shifted_input_ids == -100 , __lowercase , __lowercase )
return shifted_input_ids
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
| 360 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''camembert'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : Dict=3072 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : List[str]=1E-12 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : str=2 , __UpperCAmelCase : int="absolute" , __UpperCAmelCase : Any=True , __UpperCAmelCase : int=None , **__UpperCAmelCase : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
_A = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 174 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : int = BertJapaneseTokenizer
snake_case__ : Optional[int] = False
snake_case__ : Union[str, Any] = True
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
super().setUp()
a_ : Optional[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
a_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
a_ : Optional[int] = 'こんにちは、世界。 \nこんばんは、世界。'
a_ : Optional[Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
a_ , a_ : int = self.get_input_output_texts(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
return text, ids
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
a_ : Union[str, Any] = self.tokenizer_class(self.vocab_file )
a_ : str = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
a_ : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
a_ : str = 'こんにちは、世界。\nこんばんは、世界。'
a_ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
a_ : Optional[int] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as handle:
pickle.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as handle:
a_ : Union[str, Any] = pickle.load(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = tokenizer_new.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
a_ : List[str] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
try:
a_ : List[str] = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
try:
a_ : Union[str, Any] = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = MecabTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
try:
a_ : Any = MecabTokenizer(
do_lower_case=SCREAMING_SNAKE_CASE__ , normalize_text=SCREAMING_SNAKE_CASE__ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
a_ : int = MecabTokenizer(normalize_text=SCREAMING_SNAKE_CASE__ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
a_ : List[str] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = 'こんにちは、世界。\nこんばんは、世界。'
a_ : Dict = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
a_ : int = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as handle:
pickle.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as handle:
a_ : Union[str, Any] = pickle.load(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = tokenizer_new.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
a_ : List[str] = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
a_ : List[Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : Tuple = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
a_ : Dict = SudachiTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
a_ : Any = SudachiTokenizer(normalize_text=SCREAMING_SNAKE_CASE__ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
a_ : int = SudachiTokenizer(trim_whitespace=SCREAMING_SNAKE_CASE__ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
a_ : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
a_ : Any = 'こんにちは、世界。\nこんばんは、世界。'
a_ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
a_ : Optional[int] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as handle:
pickle.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as handle:
a_ : Optional[Any] = pickle.load(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = tokenizer_new.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
a_ : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ : Union[str, Any] = JumanppTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
a_ : Optional[int] = JumanppTokenizer(normalize_text=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
a_ : Dict = JumanppTokenizer(trim_whitespace=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
a_ : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
a_ : Tuple = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
a_ : List[Any] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
a_ : List[str] = i
a_ : Dict = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
a_ : List[str] = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
a_ : List[str] = tokenizer.subword_tokenizer
a_ : Optional[int] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
a_ : Optional[int] = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
a_ : Optional[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
a_ : int = tokenizer.encode('ありがとう。' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
a_ : Dict = tokenizer.encode('どういたしまして。' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
a_ : int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : Optional[int] = BertJapaneseTokenizer
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
super().setUp()
a_ : int = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
a_ : List[Any] = 'こんにちは、世界。 \nこんばんは、世界。'
a_ : Optional[Any] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
a_ : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
a_ : Any = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Dict = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a_ : Dict = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
a_ : str = i
a_ : Optional[Any] = CharacterTokenizer(vocab=SCREAMING_SNAKE_CASE__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Union[str, Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
a_ : Optional[Any] = tokenizer.encode('ありがとう。' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = tokenizer.encode('どういたしまして。' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : int = 'cl-tohoku/bert-base-japanese'
a_ : Optional[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : List[str] = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
a_ : Tuple = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 32 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=3_7 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Any:
a_ : Tuple = parent
a_ : int = batch_size
a_ : Tuple = seq_length
a_ : List[Any] = is_training
a_ : List[str] = use_token_type_ids
a_ : Dict = use_labels
a_ : Any = vocab_size
a_ : List[str] = hidden_size
a_ : Tuple = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : Dict = intermediate_size
a_ : Any = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : Tuple = attention_probs_dropout_prob
a_ : Optional[Any] = max_position_embeddings
a_ : List[Any] = type_vocab_size
a_ : int = type_sequence_label_size
a_ : List[Any] = initializer_range
a_ : List[str] = num_labels
a_ : Union[str, Any] = num_choices
a_ : str = scope
a_ : Tuple = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Any = None
if self.use_token_type_ids:
a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : List[Any] = None
a_ : Union[str, Any] = None
a_ : List[Any] = None
if self.use_labels:
a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
a_ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
a_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
a_ : Dict = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
a_ : str = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
a_ : int = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
a_ : Any = self.num_labels
a_ : Dict = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Optional[Any] = config_and_inputs
a_ : Optional[int] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Tuple = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ : List[str] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ : Dict = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=False ) -> List[str]:
a_ : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
a_ : str = inputs_dict['labels']
a_ : Optional[int] = inputs_dict['labels']
a_ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
a_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
a_ : str = OpenAIGPTModelTester(self )
a_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=3_7 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : str = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Dict = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is
a_ : Tuple = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a_ : Dict = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
| 32 | 1 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a__ ( __lowercase , __lowercase ) -> Any:
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
_A = tmp_path / "cache"
_A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = tmp_path / "cache"
_A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_A = features.copy() if features else default_expected_features
_A = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_A = ParquetDatasetReader(__lowercase , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
_A = tmp_path / "cache"
_A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_A = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , split=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
if issubclass(__lowercase , __lowercase ):
_A = parquet_path
elif issubclass(__lowercase , __lowercase ):
_A = [parquet_path]
_A = tmp_path / "cache"
_A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_A = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
def a__ ( __lowercase , __lowercase , __lowercase=("train",) ) -> Any:
assert isinstance(__lowercase , __lowercase )
for split in splits:
_A = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( __lowercase , __lowercase , __lowercase ) -> Dict:
_A = tmp_path / "cache"
_A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
_A = tmp_path / "cache"
_A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_A = features.copy() if features else default_expected_features
_A = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_A = ParquetDatasetReader({"train": parquet_path} , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( __lowercase , __lowercase , __lowercase ) -> str:
if split:
_A = {split: parquet_path}
else:
_A = "train"
_A = {"train": parquet_path, "test": parquet_path}
_A = tmp_path / "cache"
_A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_A = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a__ ( __lowercase , __lowercase ) -> Union[str, Any]:
_A = ParquetDatasetWriter(__lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_A = pq.ParquetFile(tmp_path / "foo.parquet" )
_A = pf.read()
assert dataset.data.table == output_table
def a__ ( __lowercase , __lowercase ) -> str:
_A = str(shared_datadir / "test_image_rgb.jpg" )
_A = {"image": [image_path]}
_A = Features({"image": Image()} )
_A = Dataset.from_dict(__lowercase , features=__lowercase )
_A = ParquetDatasetWriter(__lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_A = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_A = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def a__ ( __lowercase , __lowercase ) -> List[Any]:
assert get_writer_batch_size(__lowercase ) == expected | 163 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 163 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCAmelCase_ = '\\n\n'
UpperCAmelCase_ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
UpperCAmelCase_ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int = 16 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = """cuda"""
else:
UpperCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
UpperCAmelCase__ = model.to(_UpperCAmelCase )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_UpperCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase )
UpperCAmelCase__ = encodings["""input_ids"""]
UpperCAmelCase__ = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ):
UpperCAmelCase__ = min(start_index + batch_size , len(_UpperCAmelCase ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
| 346 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ):
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor:
UpperCAmelCase__ = []
UpperCAmelCase__ = Counter()
UpperCAmelCase__ = 0
UpperCAmelCase__ = defaultdict(_UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ):
for candidate in candidates:
UpperCAmelCase__ = candidate + """\n""" + test_case
UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id])
UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase )
futures.append(_UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_UpperCAmelCase ):
UpperCAmelCase__ = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
UpperCAmelCase__ , UpperCAmelCase__ = [], []
for result in results.values():
result.sort()
UpperCAmelCase__ = [r[1]["""passed"""] for r in result]
total.append(len(_UpperCAmelCase ) )
correct.append(sum(_UpperCAmelCase ) )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = k
UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
else:
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ )
return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
| 346 | 1 |
"""simple docstring"""
__lowercase = 0 # The first color of the flag.
__lowercase = 1 # The second color of the flag.
__lowercase = 2 # The third color of the flag.
__lowercase = (red, white, blue)
def lowerCAmelCase (__UpperCamelCase : list ):
"""simple docstring"""
if not sequence:
return []
if len(__UpperCamelCase ) == 1:
return list(__UpperCamelCase )
__UpperCamelCase =0
__UpperCamelCase =len(__UpperCamelCase ) - 1
__UpperCamelCase =0
while mid <= high:
if sequence[mid] == colors[0]:
__UpperCamelCase , __UpperCamelCase =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__UpperCamelCase , __UpperCamelCase =sequence[high], sequence[mid]
high -= 1
else:
__UpperCamelCase =F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__UpperCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase = input('''Enter numbers separated by commas:\n''').strip()
__lowercase = [int(item.strip()) for item in user_input.split(''',''')]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 353 | """simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _lowercase :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =data
__UpperCamelCase =None
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =None
__UpperCamelCase =None
def __iter__( self : int ) -> Iterator[Any]:
'''simple docstring'''
__UpperCamelCase =self.head
while self.head:
yield node.data
__UpperCamelCase =node.next
if node == self.head:
break
def __len__( self : Union[str, Any] ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : str ) -> Union[str, Any]:
'''simple docstring'''
return "->".join(str(UpperCamelCase__ ) for item in iter(self ) )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
__UpperCamelCase =Node(UpperCamelCase__ )
if self.head is None:
__UpperCamelCase =new_node # first node points itself
__UpperCamelCase =__UpperCamelCase =new_node
elif index == 0: # insert at head
__UpperCamelCase =self.head
__UpperCamelCase =__UpperCamelCase =new_node
else:
__UpperCamelCase =self.head
for _ in range(index - 1 ):
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next
__UpperCamelCase =new_node
if index == len(self ) - 1: # insert at tail
__UpperCamelCase =new_node
def UpperCAmelCase_ ( self : Any ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
__UpperCamelCase =self.head
if self.head == self.tail: # just one node
__UpperCamelCase =__UpperCamelCase =None
elif index == 0: # delete head node
__UpperCamelCase =self.tail.next.next
__UpperCamelCase =self.head.next
else:
__UpperCamelCase =self.head
for _ in range(index - 1 ):
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next.next
if index == len(self ) - 1: # delete at tail
__UpperCamelCase =temp
return delete_node.data
def UpperCAmelCase_ ( self : str ) -> bool:
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =CircularLinkedList()
assert len(__UpperCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__UpperCamelCase ) == i
circular_linked_list.insert_nth(__UpperCamelCase , i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
import math
def a__ ( _UpperCamelCase : List[Any] ):
__lowerCamelCase = [True] * n
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
__lowerCamelCase = i * 2
while index < n:
__lowerCamelCase = False
__lowerCamelCase = index + i
__lowerCamelCase = [2]
for i in range(3 ,_UpperCamelCase ,2 ):
if is_prime[i]:
primes.append(_UpperCamelCase )
return primes
def a__ ( _UpperCamelCase : List[Any] = 99_99_66_66_33_33 ):
__lowerCamelCase = math.floor(math.sqrt(_UpperCamelCase ) ) + 1_00
__lowerCamelCase = prime_sieve(_UpperCamelCase )
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = primes[prime_index]
while (last_prime**2) <= limit:
__lowerCamelCase = primes[prime_index + 1]
__lowerCamelCase = last_prime**2
__lowerCamelCase = next_prime**2
# Get numbers divisible by lps(current)
__lowerCamelCase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__lowerCamelCase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__lowerCamelCase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__lowerCamelCase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 330 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Any = BioGptTokenizer
lowerCamelCase_ : Optional[Any] = False
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
snake_case_ : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__magic_name__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : str = '''lower newer'''
snake_case_ : Dict = '''lower newer'''
return input_text, output_text
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
snake_case_ : Union[str, Any] = '''lower'''
snake_case_ : Optional[int] = ['''low''', '''er</w>''']
snake_case_ : Any = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
snake_case_ : Optional[int] = tokens + ['''<unk>''']
snake_case_ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ )
snake_case_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ )
snake_case_ : str = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 279 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
__SCREAMING_SNAKE_CASE :List[Any] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__SCREAMING_SNAKE_CASE :Union[str, Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def UpperCAmelCase_ ( __lowercase : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(__lowercase ) ):
_UpperCAmelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_UpperCAmelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__lowercase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__lowercase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__lowercase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_UpperCAmelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__lowercase )
return next_generation
def UpperCAmelCase_ ( __lowercase : list[list[int]] , __lowercase : int ) -> list[Image.Image]:
'''simple docstring'''
_UpperCAmelCase = []
for _ in range(__lowercase ):
# Create output image
_UpperCAmelCase = Image.new("RGB" , (len(cells[0] ), len(__lowercase )) )
_UpperCAmelCase = img.load()
# Save cells to image
for x in range(len(__lowercase ) ):
for y in range(len(cells[0] ) ):
_UpperCAmelCase = 255 - cells[y][x] * 255
_UpperCAmelCase = (colour, colour, colour)
# Save image
images.append(__lowercase )
_UpperCAmelCase = new_generation(__lowercase )
return images
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :str = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 363 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE :Dict = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Optional[int] = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 156 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_lowerCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , *UpperCAmelCase__ : str , **UpperCAmelCase__ : int) ->None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : Optional[int] = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : int = use_attention_mask
UpperCAmelCase__ : Any = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = type_vocab_size
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = num_choices
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : int = None
if self.use_attention_mask:
UpperCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = config_and_inputs
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = config_and_inputs
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = FlaxRobertaModelTester(self )
@slow
def _a (self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class_name.from_pretrained("""roberta-base""" , from_pt=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
| 171 | 0 |
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 16
__snake_case = 32
def __lowerCAmelCase ( lowercase : Dict ) -> Tuple:
"""simple docstring"""
return int(x / 2**20 )
class _lowerCAmelCase :
def __enter__( self ) -> Tuple:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
snake_case : Any = torch.cuda.memory_allocated()
return self
def __exit__( self , *UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
snake_case : Union[str, Any] = torch.cuda.memory_allocated()
snake_case : List[str] = torch.cuda.max_memory_allocated()
snake_case : List[str] = bamb(self.end - self.begin )
snake_case : Dict = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase ( lowercase : Accelerator , lowercase : int = 16 , lowercase : str = "bert-base-cased" , lowercase : int = 320 , lowercase : int = 160 , ) -> str:
"""simple docstring"""
snake_case : Dict = AutoTokenizer.from_pretrained(lowercase )
snake_case : Any = load_dataset(
"glue" , "mrpc" , split={"train": F'train[:{n_train}]', "validation": F'validation[:{n_val}]'} )
def tokenize_function(lowercase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Optional[int] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case : List[Any] = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(lowercase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case : List[str] = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
snake_case : List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : Union[str, Any] = config["lr"]
snake_case : List[Any] = int(config["num_epochs"] )
snake_case : Union[str, Any] = int(config["seed"] )
snake_case : List[Any] = int(config["batch_size"] )
snake_case : Optional[Any] = args.model_name_or_path
set_seed(lowercase )
snake_case ,snake_case : Tuple = get_dataloaders(lowercase , lowercase , lowercase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : Any = AutoModelForSequenceClassification.from_pretrained(lowercase , return_dict=lowercase )
# Instantiate optimizer
snake_case : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case : Any = optimizer_cls(params=model.parameters() , lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
snake_case : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case : List[str] = 1
snake_case : str = (len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case : Any = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=0 , num_training_steps=lowercase , )
else:
snake_case : List[str] = DummyScheduler(lowercase , total_num_steps=lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case ,snake_case ,snake_case ,snake_case ,snake_case : Dict = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# We need to keep track of how many total steps we have iterated over
snake_case : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case : Optional[Any] = 0
# Now we train the model
snake_case : int = {}
for epoch in range(lowercase , lowercase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowercase ):
snake_case : Optional[Any] = model(**lowercase )
snake_case : List[Any] = outputs.loss
snake_case : Any = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
snake_case : Union[str, Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(lowercase , lowercase )
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : str = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=lowercase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase , )
parser.add_argument(
"--output_dir" , type=lowercase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=lowercase , default=lowercase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=lowercase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=lowercase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=lowercase , default=1 , help="Number of train epochs." , )
snake_case : int = parser.parse_args()
snake_case : str = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 112 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__snake_case = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def __lowerCAmelCase ( lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
snake_case : Optional[Any] = list(s_dict.keys() )
for key in keys:
snake_case : Any = R".*/layers_(\d+)"
snake_case : Tuple = key
if re.match(lowercase , lowercase ):
snake_case : List[str] = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowercase )
snake_case : Union[str, Any] = R"(encoder|decoder)\/"
if re.match(lowercase , lowercase ):
snake_case : Any = re.match(lowercase , lowercase ).groups()
if groups[0] == "encoder":
snake_case : Union[str, Any] = re.sub(R"/mlp/" , R"/1/mlp/" , lowercase )
snake_case : int = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowercase )
elif groups[0] == "decoder":
snake_case : str = re.sub(R"/mlp/" , R"/2/mlp/" , lowercase )
snake_case : List[str] = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowercase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
snake_case : int = new_key.replace(lowercase , lowercase )
print(F'{key} -> {new_key}' )
snake_case : Optional[Any] = s_dict.pop(lowercase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
snake_case : int = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
snake_case : Optional[Any] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
snake_case : Tuple = s_dict[key].shape[0]
snake_case : int = s_dict[key]
for idx in range(lowercase ):
snake_case : List[str] = expert_weihts[idx]
print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowercase )
return s_dict
__snake_case = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def __lowerCAmelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> int:
"""simple docstring"""
import regex as re
with open(lowercase , "r" ) as f:
snake_case : List[str] = f.read()
snake_case : Tuple = re.findall(R"(.*) = ([0-9.]*)" , lowercase )
snake_case : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
snake_case : Tuple = float(lowercase ) if "." in value else int(lowercase )
snake_case : List[str] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowercase )[0]
snake_case : List[Any] = str(activation[1] )
snake_case : Optional[Any] = num_experts
snake_case : List[Any] = SwitchTransformersConfig(**lowercase )
return config
def __lowerCAmelCase ( lowercase : Tuple , lowercase : Tuple , lowercase : Union[str, Any]=None , lowercase : Any="./" , lowercase : int=8 ) -> Dict:
"""simple docstring"""
print(F'Loading flax weights from : {flax_checkpoint_path}' )
snake_case : Union[str, Any] = checkpoints.load_tax_checkpoint(lowercase )
if gin_file is not None:
snake_case : List[str] = convert_gin_to_config(lowercase , lowercase )
else:
snake_case : str = SwitchTransformersConfig.from_pretrained(lowercase )
snake_case : Any = SwitchTransformersForConditionalGeneration(lowercase )
snake_case : Optional[Any] = flax_params["target"]
snake_case : Optional[int] = flatten_dict(lowercase , sep="/" )
snake_case : Optional[Any] = rename_keys(lowercase )
snake_case : List[str] = unflatten_dict(lowercase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowercase , lowercase )
print(F'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
__snake_case = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 112 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : List[Any] , lowercase_ : int=3 , lowercase_ : Dict=32 , lowercase_ : Optional[Any]=3 , lowercase_ : Tuple=10 , lowercase_ : Optional[Any]=[10, 20, 30, 40] , lowercase_ : List[str]=[1, 1, 2, 1] , lowercase_ : Optional[int]=True , lowercase_ : str=True , lowercase_ : Dict="relu" , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=None , ) -> int:
UpperCAmelCase : Dict = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Any = image_size
UpperCAmelCase : Any = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : str = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : int = use_labels
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Union[str, Any] = scope
UpperCAmelCase : Any = len(lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = TFResNetModel(config=lowercase_ )
UpperCAmelCase : int = model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] ) -> List[Any]:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Union[str, Any] = TFResNetForImageClassification(lowercase_ )
UpperCAmelCase : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCAmelCase_ : Dict = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[int] = False
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Optional[int] = TFResNetModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(lowercase_ )
UpperCAmelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
def check_hidden_states_output(lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = model_class(lowercase_ )
UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : List[Any] = layer_type
UpperCAmelCase : int = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : List[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = TFResNetModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCamelCase( ):
UpperCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Any = self.default_image_processor
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=lowercase_ , return_tensors='tf' )
# forward pass
UpperCAmelCase : List[Any] = model(**lowercase_ )
# verify the logits
UpperCAmelCase : Optional[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase : int = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase_ , atol=1E-4 ) )
| 151 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowercase__ = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
lowercase__ = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
lowercase__ = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Tuple=4 , lowercase_ : List[str]=False ) -> Union[str, Any]:
UpperCAmelCase : Tuple = compute_bleu(
reference_corpus=lowercase_ , translation_corpus=lowercase_ , max_order=lowercase_ , smooth=lowercase_ )
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 151 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__A = random.Random()
if is_torch_available():
import torch
def snake_case_(_UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> Optional[int]:
"""simple docstring"""
if rng is None:
_snake_case = global_rng
_snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase_ ( unittest.TestCase ):
def __init__( self : Dict , A__ : List[Any] , A__ : Optional[int]=7 , A__ : Optional[int]=400 , A__ : Tuple=2000 , A__ : str=1 , A__ : str=0.0 , A__ : Optional[Any]=16000 , A__ : List[Any]=True , A__ : Optional[Any]=True , ) -> List[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = min_seq_length
_snake_case = max_seq_length
_snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case = feature_size
_snake_case = padding_value
_snake_case = sampling_rate
_snake_case = return_attention_mask
_snake_case = do_normalize
def UpperCamelCase_ ( self : int ) -> Optional[int]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self : List[Any] , A__ : Tuple=False , A__ : Tuple=False ) -> Union[str, Any]:
def _flatten(A__ : List[str] ):
return list(itertools.chain(*A__ ) )
if equal_length:
_snake_case = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_snake_case = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase_ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = ASTFeatureExtractor
def UpperCamelCase_ ( self : Optional[Any] ) -> List[Any]:
_snake_case = ASTFeatureExtractionTester(self )
def UpperCamelCase_ ( self : Dict ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
_snake_case = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_snake_case = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
_snake_case = feat_extract(A__ , padding=A__ , return_tensors='''np''' ).input_values
_snake_case = feat_extract(A__ , padding=A__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_snake_case = np.asarray(A__ )
_snake_case = feat_extract(A__ , return_tensors='''np''' ).input_values
_snake_case = feat_extract(A__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def UpperCamelCase_ ( self : Dict ) -> str:
import torch
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = np.random.rand(100 ).astype(np.floataa )
_snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_snake_case = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ ( self : List[Any] , A__ : Optional[Any] ) -> Tuple:
from datasets import load_dataset
_snake_case = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_snake_case = ds.sort('''id''' ).select(range(A__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase_ ( self : List[str] ) -> Dict:
# fmt: off
_snake_case = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
_snake_case = self._load_datasamples(1 )
_snake_case = ASTFeatureExtractor()
_snake_case = feature_extractor(A__ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 369 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '''▁'''
__A = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__A = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__A = {
'''facebook/m2m100_418M''': 10_24,
}
# fmt: off
__A = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class lowercase_ ( __lowercase ):
UpperCamelCase_ : str = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = ["input_ids", "attention_mask"]
UpperCamelCase_ : List[int] = []
UpperCamelCase_ : List[int] = []
def __init__( self : str , A__ : str , A__ : Optional[Any] , A__ : Union[str, Any]=None , A__ : Dict=None , A__ : Any="<s>" , A__ : Union[str, Any]="</s>" , A__ : Tuple="</s>" , A__ : Dict="<pad>" , A__ : List[Any]="<unk>" , A__ : str="m2m100" , A__ : Optional[Dict[str, Any]] = None , A__ : List[Any]=8 , **A__ : Union[str, Any] , ) -> None:
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
_snake_case = language_codes
_snake_case = FAIRSEQ_LANGUAGE_CODES[language_codes]
_snake_case = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
_snake_case = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A__ )
for lang_code in fairseq_language_code
if self.get_lang_token(A__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A__ , tgt_lang=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , unk_token=A__ , pad_token=A__ , language_codes=A__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A__ , **A__ , )
_snake_case = vocab_file
_snake_case = load_json(A__ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = spm_file
_snake_case = load_spm(A__ , self.sp_model_kwargs )
_snake_case = len(self.encoder )
_snake_case = {
self.get_lang_token(A__ ): self.encoder_size + i for i, lang_code in enumerate(A__ )
}
_snake_case = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A__ )}
_snake_case = {v: k for k, v in self.lang_token_to_id.items()}
_snake_case = src_lang if src_lang is not None else '''en'''
_snake_case = tgt_lang
_snake_case = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_snake_case = num_madeup_words
@property
def UpperCamelCase_ ( self : int ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase_ ( self : Dict ) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : List[str] , A__ : str ) -> None:
_snake_case = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self : Any , A__ : str ) -> List[str]:
return self.sp_model.encode(A__ , out_type=A__ )
def UpperCamelCase_ ( self : Optional[int] , A__ : Dict ) -> str:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A__ , self.encoder[self.unk_token] )
def UpperCamelCase_ ( self : Union[str, Any] , A__ : int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A__ , self.unk_token )
def UpperCamelCase_ ( self : Optional[int] , A__ : Optional[int] ) -> List[Any]:
_snake_case = []
_snake_case = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A__ ) + token
_snake_case = []
else:
current_sub_tokens.append(A__ )
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def UpperCamelCase_ ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
_snake_case = [1] * len(self.prefix_tokens )
_snake_case = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def UpperCamelCase_ ( self : Tuple , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : str ) -> Dict:
_snake_case = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Dict:
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : Union[str, Any] , A__ : Dict ) -> None:
_snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_snake_case = {}
_snake_case = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase_ ( self : Any , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
_snake_case = Path(A__ )
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""" )
_snake_case = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_snake_case = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , A__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A__ )
elif not os.path.isfile(self.spm_file ):
with open(A__ , '''wb''' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (str(A__ ), str(A__ ))
def UpperCamelCase_ ( self : Optional[int] , A__ : List[str] , A__ : str = "en" , A__ : Optional[List[str]] = None , A__ : str = "ro" , **A__ : List[Any] , ) -> BatchEncoding:
_snake_case = src_lang
_snake_case = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def UpperCamelCase_ ( self : List[str] , A__ : int , A__ : Optional[str] , A__ : Optional[str] , **A__ : Union[str, Any] ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_snake_case = src_lang
_snake_case = self(A__ , add_special_tokens=A__ , **A__ )
_snake_case = self.get_lang_id(A__ )
_snake_case = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : Dict ) -> Optional[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : Optional[Any] ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : List[Any] , A__ : str ) -> None:
_snake_case = self.get_lang_token(A__ )
_snake_case = self.lang_token_to_id[lang_token]
_snake_case = [self.cur_lang_id]
_snake_case = [self.eos_token_id]
def UpperCamelCase_ ( self : List[str] , A__ : str ) -> None:
_snake_case = self.get_lang_token(A__ )
_snake_case = self.lang_token_to_id[lang_token]
_snake_case = [self.cur_lang_id]
_snake_case = [self.eos_token_id]
def UpperCamelCase_ ( self : Dict , A__ : str ) -> str:
return self.lang_code_to_token[lang]
def UpperCamelCase_ ( self : Tuple , A__ : str ) -> int:
_snake_case = self.get_lang_token(A__ )
return self.lang_token_to_id[lang_token]
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
_snake_case = sentencepiece.SentencePieceProcessor(**_UpperCamelCase )
spm.Load(str(_UpperCamelCase ) )
return spm
def snake_case_(_UpperCamelCase ) -> Union[Dict, List]:
"""simple docstring"""
with open(_UpperCamelCase , '''r''' ) as f:
return json.load(_UpperCamelCase )
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
with open(_UpperCamelCase , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase , indent=2 )
| 278 | 0 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Any=None ) -> Any:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
snake_case = nn.Parameter(__lowerCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
snake_case = nn.Parameter(__lowerCAmelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
snake_case = np.asarray(weights[0] )
snake_case = np.asarray(weights[1] )
snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCAmelCase ).view(-1 , __lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case = np.asarray(weights[0] )
snake_case = np.asarray(weights[1] )
snake_case = np.asarray(weights[2] )
snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCAmelCase ).view(-1 , __lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
snake_case = weights[0][0][0]
snake_case = np.asarray(layer_norm_a[0] )
snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , )
# lsh weights + output
snake_case = weights[0][1]
if len(__lowerCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(__lowerCAmelCase , torch_block.attention , __lowerCAmelCase )
else:
set_layer_weights_in_torch_local(__lowerCAmelCase , torch_block.attention , __lowerCAmelCase )
# intermediate weighs
snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowerCAmelCase ) == 4:
snake_case = intermediate_weights[2]
# layernorm 2
snake_case = np.asarray(intermediate_weights[0][0] )
snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , )
# intermediate dense
snake_case = np.asarray(intermediate_weights[1][0] )
snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , )
# intermediate out
snake_case = np.asarray(intermediate_weights[4][0] )
snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , )
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
snake_case = torch_model.reformer
# word embeds
snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowerCAmelCase ) , )
if isinstance(weights[3] , __lowerCAmelCase ):
snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
snake_case = nn.Parameter(torch.tensor(__lowerCAmelCase ) )
snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowerCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# output layer norm
snake_case = np.asarray(weights[7][0] )
snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , )
# output embeddings
snake_case = np.asarray(weights[9][0] )
snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , )
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case = ReformerConfig.from_json_file(__lowerCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case = ReformerModelWithLMHead(__lowerCAmelCase )
with open(__lowerCAmelCase , 'rb' ) as f:
snake_case = pickle.load(__lowerCAmelCase )['''weights''']
set_model_weights_in_torch(__lowerCAmelCase , __lowerCAmelCase , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 150 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ = logging.get_logger(__name__)
class a ( __lowerCamelCase ):
__lowerCAmelCase : str = ["""input_features""", """is_longer"""]
def __init__( self :Union[str, Any] ,__lowercase :str=6_4 ,__lowercase :Any=4_8_0_0_0 ,__lowercase :List[Any]=4_8_0 ,__lowercase :Optional[int]=1_0 ,__lowercase :Optional[int]=1_0_2_4 ,__lowercase :int=0.0 ,__lowercase :List[Any]=False ,__lowercase :float = 0 ,__lowercase :float = 1_4_0_0_0 ,__lowercase :int = None ,__lowercase :str = "fusion" ,__lowercase :str = "repeatpad" ,**__lowercase :List[Any] ,):
super().__init__(
feature_size=__lowercase ,sampling_rate=__lowercase ,padding_value=__lowercase ,return_attention_mask=__lowercase ,**__lowercase ,)
snake_case__ : Optional[Any] = top_db
snake_case__ : Tuple = truncation
snake_case__ : Tuple = padding
snake_case__ : List[Any] = fft_window_size
snake_case__ : List[Any] = (fft_window_size >> 1) + 1
snake_case__ : str = hop_length
snake_case__ : Dict = max_length_s
snake_case__ : List[str] = max_length_s * sampling_rate
snake_case__ : List[Any] = sampling_rate
snake_case__ : Any = frequency_min
snake_case__ : Dict = frequency_max
snake_case__ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=__lowercase ,min_frequency=__lowercase ,max_frequency=__lowercase ,sampling_rate=__lowercase ,norm=__lowercase ,mel_scale='''htk''' ,)
snake_case__ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=__lowercase ,min_frequency=__lowercase ,max_frequency=__lowercase ,sampling_rate=__lowercase ,norm='''slaney''' ,mel_scale='''slaney''' ,)
def __lowerCamelCase ( self :int ):
snake_case__ : Dict = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :np.array ,__lowercase :Optional[np.array] = None ):
snake_case__ : List[Any] = spectrogram(
__lowercase ,window_function(self.fft_window_size ,'''hann''' ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=__lowercase ,log_mel='''dB''' ,)
return log_mel_spectrogram.T
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[str] ,__lowercase :Tuple ,__lowercase :List[str] ):
snake_case__ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case__ : Tuple = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case__ : str = [0]
# randomly choose index for each part
snake_case__ : Dict = np.random.choice(ranges[0] )
snake_case__ : Any = np.random.choice(ranges[1] )
snake_case__ : Dict = np.random.choice(ranges[2] )
snake_case__ : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
snake_case__ : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
snake_case__ : List[str] = mel[idx_back : idx_back + chunk_frames, :]
snake_case__ : Optional[Any] = torch.tensor(mel[None, None, :] )
snake_case__ : Any = torch.nn.functional.interpolate(
__lowercase ,size=[chunk_frames, 6_4] ,mode='''bilinear''' ,align_corners=__lowercase )
snake_case__ : List[Any] = mel_shrink[0][0].numpy()
snake_case__ : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def __lowerCamelCase ( self :Any ,__lowercase :np.array ,__lowercase :str ,__lowercase :int ,__lowercase :List[str] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
snake_case__ : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
snake_case__ : List[str] = len(__lowercase ) - max_length
snake_case__ : Any = np.random.randint(0 ,overflow + 1 )
snake_case__ : Tuple = waveform[idx : idx + max_length]
snake_case__ : Tuple = self._np_extract_fbank_features(__lowercase ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
snake_case__ : str = self._np_extract_fbank_features(__lowercase ,self.mel_filters )
snake_case__ : Union[str, Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
snake_case__ : Union[str, Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
snake_case__ : Tuple = np.stack([mel, mel, mel, mel] ,axis=0 )
snake_case__ : List[Any] = False
else:
snake_case__ : List[Any] = self._random_mel_fusion(__lowercase ,__lowercase ,__lowercase )
snake_case__ : Dict = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
snake_case__ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
snake_case__ : List[str] = int(max_length / len(__lowercase ) )
snake_case__ : List[str] = np.stack(np.tile(__lowercase ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
snake_case__ : Union[str, Any] = int(max_length / len(__lowercase ) )
snake_case__ : List[str] = np.stack(np.tile(__lowercase ,__lowercase ) )
snake_case__ : int = np.pad(__lowercase ,(0, max_length - waveform.shape[0]) ,mode='''constant''' ,constant_values=0 )
if truncation == "fusion":
snake_case__ : Tuple = self._np_extract_fbank_features(__lowercase ,self.mel_filters )
snake_case__ : Optional[int] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
snake_case__ : List[Any] = self._np_extract_fbank_features(__lowercase ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self :Dict ,__lowercase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,__lowercase :str = None ,__lowercase :Optional[str] = None ,__lowercase :Optional[int] = None ,__lowercase :Optional[int] = None ,__lowercase :Optional[Union[str, TensorType]] = None ,**__lowercase :Optional[int] ,):
snake_case__ : Optional[int] = truncation if truncation is not None else self.truncation
snake_case__ : Optional[int] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
snake_case__ : List[str] = isinstance(__lowercase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
snake_case__ : Optional[int] = is_batched_numpy or (
isinstance(__lowercase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
snake_case__ : Optional[Any] = [np.asarray(__lowercase ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase ,np.ndarray ):
snake_case__ : Tuple = np.asarray(__lowercase ,dtype=np.floataa )
elif isinstance(__lowercase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case__ : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ : Dict = [np.asarray(__lowercase )]
# convert to mel spectrogram, truncate and pad if needed.
snake_case__ : Any = [
self._get_input_mel(__lowercase ,max_length if max_length else self.nb_max_samples ,__lowercase ,__lowercase )
for waveform in raw_speech
]
snake_case__ : Any = []
snake_case__ : Tuple = []
for mel, longer in padded_inputs:
input_mel.append(__lowercase )
is_longer.append(__lowercase )
if truncation == "fusion" and sum(__lowercase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
snake_case__ : Optional[int] = np.random.randint(0 ,len(__lowercase ) )
snake_case__ : List[str] = True
if isinstance(input_mel[0] ,__lowercase ):
snake_case__ : Optional[int] = [np.asarray(__lowercase ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
snake_case__ : Dict = [[longer] for longer in is_longer]
snake_case__ : Dict = {'''input_features''': input_mel, '''is_longer''': is_longer}
snake_case__ : str = BatchFeature(__lowercase )
if return_tensors is not None:
snake_case__ : List[str] = input_features.convert_to_tensors(__lowercase )
return input_features
| 230 | 0 |
import os
import sys
import unittest
snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
snake_case : int = os.path.join(git_repo_path, "src", "transformers")
snake_case : Union[str, Any] = "\n{0} = None\n"
snake_case : Optional[int] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
snake_case : List[Any] = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(_a )
__magic_name__ : Dict = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(_a , "tokenizers" )
__magic_name__ : int = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(_a , "tensorflow_text" )
__magic_name__ : Tuple = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(_a , "sentencepiece_and_tokenizers" )
__magic_name__ : Optional[int] = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(_a , "sentencepiece_and_tensorflow_text" )
__magic_name__ : int = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(_a , "sentencepiece_and_tokenizers_and_vision" )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , _a )
self.assertIn("tensorflow_text" , _a )
self.assertIn("sentencepiece_and_tokenizers" , _a )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(_a , "\nCONSTANT = None\n" )
__magic_name__ : Tuple = create_dummy_object("function" , "'torch'" )
self.assertEqual(
_a , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
__magic_name__ : Any = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
__magic_name__ : Any = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
__magic_name__ : Optional[Any] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , _a )
| 41 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Optional[int] = ["model.decoder.embed_positions.weights"]
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
if "emb" in name:
__magic_name__ : Optional[Any] = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
__magic_name__ : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
__magic_name__ : Dict = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
__magic_name__ : Optional[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
__magic_name__ : List[str] = name.replace("linear2" , "fc2" )
if "norm1" in name:
__magic_name__ : Optional[int] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
__magic_name__ : Union[str, Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
__magic_name__ : Any = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
__magic_name__ : Union[str, Any] = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
__magic_name__ : Optional[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
__magic_name__ : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowerCAmelCase_ ( _snake_case : OrderedDict , _snake_case : int ) -> Tuple[Dict, Dict]:
'''simple docstring'''
__magic_name__ : int = list(state_dict.keys() )
__magic_name__ : Dict = {}
for key in keys:
__magic_name__ : Any = state_dict.pop(_snake_case )
__magic_name__ : Optional[Any] = rename_keys(_snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
__magic_name__ : Optional[int] = val[:hidden_size, :]
__magic_name__ : List[str] = val[hidden_size : 2 * hidden_size, :]
__magic_name__ : List[str] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__magic_name__ : int = val
else:
__magic_name__ : str = val
return state_dict, enc_dec_proj_state_dict
def lowerCAmelCase_ ( _snake_case : str ) -> MusicgenDecoderConfig:
'''simple docstring'''
if checkpoint == "small":
# default config values
__magic_name__ : Tuple = 1024
__magic_name__ : List[str] = 24
__magic_name__ : str = 16
elif checkpoint == "medium":
__magic_name__ : Optional[int] = 1536
__magic_name__ : Dict = 48
__magic_name__ : List[Any] = 24
elif checkpoint == "large":
__magic_name__ : Any = 2048
__magic_name__ : int = 48
__magic_name__ : str = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
__magic_name__ : str = MusicgenDecoderConfig(
hidden_size=_snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=_snake_case , num_attention_heads=_snake_case , )
return config
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : Union[str, Any]=None , _snake_case : List[str]=None , _snake_case : Optional[Any]="cpu" ) -> List[str]:
'''simple docstring'''
__magic_name__ : Dict = MusicGen.get_pretrained(_snake_case , device=_snake_case )
__magic_name__ : Any = decoder_config_from_checkpoint(_snake_case )
__magic_name__ : Any = fairseq_model.lm.state_dict()
__magic_name__ , __magic_name__ : Optional[Any] = rename_state_dict(
_snake_case , hidden_size=decoder_config.hidden_size )
__magic_name__ : str = TaEncoderModel.from_pretrained("t5-base" )
__magic_name__ : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
__magic_name__ : int = MusicgenForCausalLM(_snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__magic_name__ , __magic_name__ : List[str] = decoder.load_state_dict(_snake_case , strict=_snake_case )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_snake_case )
if len(_snake_case ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(_snake_case ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
__magic_name__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=_snake_case , audio_encoder=_snake_case , decoder=_snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_snake_case )
# check we can do a forward pass
__magic_name__ : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__magic_name__ : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__magic_name__ : Dict = model(input_ids=_snake_case , decoder_input_ids=_snake_case ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
__magic_name__ : Optional[Any] = AutoTokenizer.from_pretrained("t5-base" )
__magic_name__ : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
__magic_name__ : Union[str, Any] = MusicgenProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
# set the appropriate bos/pad token ids
__magic_name__ : List[str] = 2048
__magic_name__ : List[str] = 2048
# set other default generation config params
__magic_name__ : Union[str, Any] = int(30 * audio_encoder.config.frame_rate )
__magic_name__ : Optional[Any] = True
__magic_name__ : Dict = 3.0
if pytorch_dump_folder is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(_snake_case )
processor.push_to_hub(_snake_case )
if __name__ == "__main__":
snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
snake_case : Optional[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 41 | 1 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case : Optional[Any] = 16
__snake_case : List[str] = 32
def _UpperCAmelCase ( a__ , a__ = 1_6):
'''simple docstring'''
a_ : str = AutoTokenizer.from_pretrained("""bert-base-cased""")
a_ : List[Any] = load_dataset("""glue""" , """mrpc""")
def tokenize_function(a__):
# max_length=None => use the model max length (it's actually the default)
a_ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=a__ , max_length=a__)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a_ : Any = datasets.map(
a__ , batched=a__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a_ : str = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(a__):
# On TPU it's best to pad everything to the same length or training will be very slow.
a_ : Optional[int] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a_ : List[str] = 1_6
elif accelerator.mixed_precision != "no":
a_ : Tuple = 8
else:
a_ : Union[str, Any] = None
return tokenizer.pad(
a__ , padding="""longest""" , max_length=a__ , pad_to_multiple_of=a__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
a_ : Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=a__ , collate_fn=a__ , batch_size=a__ , drop_last=a__)
a_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=a__ , collate_fn=a__ , batch_size=a__ , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
a_ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a_ : List[Any] = config["""lr"""]
a_ : Dict = int(config["""num_epochs"""])
a_ : List[Any] = int(config["""seed"""])
a_ : Tuple = int(config["""batch_size"""])
a_ : Tuple = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
a_ : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a_ : str = batch_size // MAX_GPU_BATCH_SIZE
a_ : str = MAX_GPU_BATCH_SIZE
set_seed(a__)
a_ , a_ : Optional[int] = get_dataloaders(a__ , a__)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=a__)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a_ : Dict = model.to(accelerator.device)
# Instantiate optimizer
a_ : List[str] = AdamW(params=model.parameters() , lr=a__)
# Instantiate scheduler
a_ : Dict = get_linear_schedule_with_warmup(
optimizer=a__ , num_warmup_steps=1_0_0 , num_training_steps=(len(a__) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a_ , a_ , a_ , a_ , a_ : Dict = accelerator.prepare(
a__ , a__ , a__ , a__ , a__)
# Now we train the model
for epoch in range(a__):
model.train()
for step, batch in enumerate(a__):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
a_ : Union[str, Any] = model(**a__)
a_ : Optional[Any] = outputs.loss
a_ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(a__)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a__):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
a_ : Optional[int] = model(**a__)
a_ : Union[str, Any] = outputs.logits.argmax(dim=-1)
a_ , a_ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=a__ , references=a__ , )
a_ : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , a__)
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=a__ , default=a__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
a_ : Tuple = parser.parse_args()
a_ : List[str] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(a__ , a__)
if __name__ == "__main__":
main()
| 248 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__snake_case : Tuple = random.Random()
def _UpperCAmelCase ( a__ , a__=1.0 , a__=None , a__=None):
'''simple docstring'''
if rng is None:
a_ : Optional[int] = global_rng
a_ : int = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
class A__(unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=400 , _lowercase=2_000 , _lowercase=10 , _lowercase=160 , _lowercase=8 , _lowercase=0.0 , _lowercase=4_000 , _lowercase=False , _lowercase=True , ) -> Dict:
a_ : Any = parent
a_ : Any = batch_size
a_ : Union[str, Any] = min_seq_length
a_ : Any = max_seq_length
a_ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a_ : Dict = padding_value
a_ : Dict = sampling_rate
a_ : Optional[Any] = return_attention_mask
a_ : Dict = do_normalize
a_ : Any = feature_size
a_ : List[str] = chunk_length
a_ : Tuple = hop_length
def UpperCamelCase__ ( self ) -> Dict:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , _lowercase=False , _lowercase=False ) -> Tuple:
def _flatten(_lowercase ):
return list(itertools.chain(*_lowercase ) )
if equal_length:
a_ : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a_ : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a_ : Optional[Any] = [np.asarray(_lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__(a_, unittest.TestCase ):
"""simple docstring"""
_A : List[str] = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase__ ( self ) -> Tuple:
a_ : Any = WhisperFeatureExtractionTester(self )
def UpperCamelCase__ ( self ) -> Tuple:
a_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Optional[int] = feat_extract_first.save_pretrained(_lowercase )[0]
check_json_file_has_correct_format(_lowercase )
a_ : Optional[Any] = self.feature_extraction_class.from_pretrained(_lowercase )
a_ : Optional[int] = feat_extract_first.to_dict()
a_ : Union[str, Any] = feat_extract_second.to_dict()
a_ : List[str] = feat_extract_first.mel_filters
a_ : Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertEqual(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : int = os.path.join(_lowercase , """feat_extract.json""" )
feat_extract_first.to_json_file(_lowercase )
a_ : str = self.feature_extraction_class.from_json_file(_lowercase )
a_ : Dict = feat_extract_first.to_dict()
a_ : Tuple = feat_extract_second.to_dict()
a_ : Dict = feat_extract_first.mel_filters
a_ : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertEqual(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a_ : int = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
a_ : List[str] = [np.asarray(_lowercase ) for speech_input in speech_inputs]
# Test feature size
a_ : str = feature_extractor(_lowercase , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
a_ : Any = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
a_ : Any = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# Test batched
a_ : str = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
a_ : str = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
a_ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a_ : List[str] = np.asarray(_lowercase )
a_ : Optional[Any] = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
a_ : Dict = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# Test truncation required
a_ : Optional[int] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
a_ : str = [np.asarray(_lowercase ) for speech_input in speech_inputs]
a_ : List[Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
a_ : Any = [np.asarray(_lowercase ) for speech_input in speech_inputs_truncated]
a_ : Union[str, Any] = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
a_ : str = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def UpperCamelCase__ ( self ) -> Dict:
import torch
a_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a_ : List[str] = np.random.rand(100 , 32 ).astype(np.floataa )
a_ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a_ : int = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a_ : Dict = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase__ ( self , _lowercase ) -> Dict:
a_ : Any = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
a_ : Union[str, Any] = ds.sort("""id""" ).select(range(_lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCamelCase__ ( self ) -> int:
# fmt: off
a_ : Union[str, Any] = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
a_ : List[str] = self._load_datasamples(1 )
a_ : List[Any] = WhisperFeatureExtractor()
a_ : Union[str, Any] = feature_extractor(_lowercase , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _lowercase , atol=1e-4 ) )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
a_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a_ : Any = self._load_datasamples(1 )[0]
a_ : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
a_ : Dict = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_lowercase )[0]
self.assertTrue(np.all(np.mean(_lowercase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowercase ) - 1 ) < 1e-3 ) )
| 248 | 1 |
from collections import defaultdict
class lowerCamelCase :
def __init__(self : Tuple , _A : Optional[int] , _A : List[str] ) -> Union[str, Any]:
snake_case = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
snake_case = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_A ) )
]
snake_case = defaultdict(_A ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
snake_case = (1 << len(_A )) - 1
def UpperCAmelCase(self : str , _A : Optional[Any] , _A : List[Any] ) -> str:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
snake_case = self.count_ways_until(_A , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
snake_case = total_ways_util
return self.dp[mask][task_no]
def UpperCAmelCase(self : Any , _A : Dict ) -> Optional[Any]:
# Store the list of persons for each task
for i in range(len(_A ) ):
for j in task_performed[i]:
self.task[j].append(_A )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_A = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_A = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 137 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase_ ( A__ ) -> list[list[float]]:
"""simple docstring"""
snake_case = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
snake_case = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
snake_case = [[0.0, 0.0], [0.0, 0.0]]
snake_case , snake_case = matrix[1][1], matrix[0][0]
snake_case , snake_case = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
snake_case = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
snake_case = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
snake_case = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
snake_case = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
snake_case = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
snake_case = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
snake_case = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
snake_case = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
snake_case = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
snake_case = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
snake_case = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
snake_case = array(A__ )
for i in range(3 ):
for j in range(3 ):
snake_case = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
snake_case = array(A__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A__ )
# Calculate the inverse of the matrix
return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 137 | 1 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 200_0000 ):
lowercase = [0 for i in range(n + 1 )]
lowercase = 1
lowercase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __SCREAMING_SNAKE_CASE ):
lowercase = 1
lowercase = 0
for i in range(__SCREAMING_SNAKE_CASE ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 195 |
import pprint
import requests
UpperCAmelCase = '''https://zenquotes.io/api'''
def UpperCAmelCase_ ( ):
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def UpperCAmelCase_ ( ):
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
UpperCAmelCase = random_quotes()
pprint.pprint(response)
| 195 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : int = 50 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 114 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : str ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
for ch in input_str:
_SCREAMING_SNAKE_CASE =ord(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =pow(2 , _UpperCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 | 1 |
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__SCREAMING_SNAKE_CASE : List[Any] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Tuple = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : Tuple = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_UpperCAmelCase : List[Any] = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
print(F"""{key} -> {new_key}""" )
_UpperCAmelCase : Union[str, Any] = s_dict.pop(_UpperCAmelCase )
return s_dict
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : int = emb.weight.shape
_UpperCAmelCase : Union[str, Any] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_UpperCAmelCase : Any = emb.weight.data
return lin_layer
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes:
"""simple docstring"""
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase : Dict = os.path.basename(_UpperCAmelCase )
_UpperCAmelCase : Optional[Any] = url.split("/" )[-2]
_UpperCAmelCase : Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(_UpperCAmelCase ):
_UpperCAmelCase : Optional[int] = open(_UpperCAmelCase , "rb" ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop:
while True:
_UpperCAmelCase : List[Any] = source.read(8_192 )
if not buffer:
break
output.write(_UpperCAmelCase )
loop.update(len(_UpperCAmelCase ) )
_UpperCAmelCase : int = open(_UpperCAmelCase , "rb" ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def UpperCamelCase_ ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
if ".pt" not in checkpoint_path:
_UpperCAmelCase : int = _download(_MODELS[checkpoint_path] )
else:
_UpperCAmelCase : Optional[int] = torch.load(_UpperCAmelCase , map_location="cpu" )
_UpperCAmelCase : Optional[Any] = original_checkpoint["dims"]
_UpperCAmelCase : str = original_checkpoint["model_state_dict"]
_UpperCAmelCase : str = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(_UpperCAmelCase )
rename_keys(_UpperCAmelCase )
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : List[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
_UpperCAmelCase : int = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
_UpperCAmelCase : List[Any] = WhisperForConditionalGeneration(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
_UpperCAmelCase : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase : Dict = proj_out_weights
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 31 | '''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Any , A : Optional[int]=None , A : Tuple=None , *A : Tuple , **A : List[str] ):
super().__init__(*A , **A )
if config is None:
assert isinstance(self.model , A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_UpperCAmelCase : str = self.model.config
else:
_UpperCAmelCase : List[str] = config
_UpperCAmelCase : List[Any] = data_args
_UpperCAmelCase : str = self.config.tgt_vocab_size if isinstance(self.config , A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
_UpperCAmelCase : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase : Dict = label_smoothed_nll_loss
def _A ( self : Tuple , A : int ):
if self.optimizer is None:
_UpperCAmelCase : Tuple = ["bias", "LayerNorm.weight"]
_UpperCAmelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
_UpperCAmelCase : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase : List[str] = Adafactor
_UpperCAmelCase : List[Any] = {"scale_parameter": False, "relative_step": False}
else:
_UpperCAmelCase : List[str] = AdamW
_UpperCAmelCase : List[str] = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
_UpperCAmelCase : List[Any] = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase : List[Any] = OSS(
params=A , optim=A , **A , )
else:
_UpperCAmelCase : Union[str, Any] = optimizer_cls(A , **A )
if self.lr_scheduler is None:
_UpperCAmelCase : List[str] = self._get_lr_scheduler(A )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _A ( self : List[str] , A : Optional[int] ):
_UpperCAmelCase : List[str] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase : str = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A )
return scheduler
def _A ( self : Tuple ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _A ( self : Any , A : Union[str, Any] , A : Union[str, Any] , A : List[Any] ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase : List[str] = model(**A , use_cache=A )[0]
_UpperCAmelCase : int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase : Any = model(**A , labels=A , use_cache=A )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase : Optional[int] = model(**A , use_cache=A )[0]
_UpperCAmelCase : List[str] = torch.nn.functional.log_softmax(A , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.loss_fn(A , A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _A ( self : List[str] , A : Optional[int] , A : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = inputs.pop("labels" )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._compute_loss(A , A , A )
return loss
def _A ( self : List[str] , A : nn.Module , A : Dict[str, Union[torch.Tensor, Any]] , A : bool , A : Optional[List[str]] = None , ):
_UpperCAmelCase : List[str] = self._prepare_inputs(A )
_UpperCAmelCase : Dict = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase : Dict = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : int = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
_UpperCAmelCase : Any = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase : str = self._compute_loss(A , A , A )
_UpperCAmelCase : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : Optional[Any] = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _A ( self : Dict , A : int , A : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase : Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
F""" padded to `max_length`={max_length}""" )
_UpperCAmelCase : Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase : Tuple = tensor
return padded_tensor
| 31 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = '''openai/whisper-base'''
lowerCamelCase :Optional[Any] = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
lowerCamelCase :str = '''transcriber'''
lowerCamelCase :int = WhisperProcessor
lowerCamelCase :int = WhisperForConditionalGeneration
lowerCamelCase :str = ['''audio''']
lowerCamelCase :str = ['''text''']
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" ).input_features
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
return self.model.generate(inputs=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return self.pre_processor.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )[0]
| 359 | import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = "openai/clip-vit-large-patch14" ) -> None:
_A = device
_A = CLIPTokenizerFast.from_pretrained(lowerCAmelCase_ )
_A = [0.4814_5466, 0.457_8275, 0.4082_1073]
_A = [0.2686_2954, 0.2613_0258, 0.2757_7711]
_A = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_A = torchvision.transforms.Resize(2_24 )
_A = torchvision.transforms.CenterCrop(2_24 )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = self.resize(lowerCAmelCase_ )
_A = self.center_crop(lowerCAmelCase_ )
_A = self.normalize(lowerCAmelCase_ )
return images
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
_A = self.tokenizer(text=lowerCAmelCase_ , **lowerCAmelCase_ )
_A = self.preprocess_img(lowerCAmelCase_ )
_A = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_=10 , lowerCAmelCase_=0.01 , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="image" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> None:
super().__init__()
_A = None
_A = device if device else get_device()
if vqgan:
_A = vqgan
else:
_A = load_vqgan(self.device , conf_path=lowerCAmelCase_ , ckpt_path=lowerCAmelCase_ )
self.vqgan.eval()
if clip:
_A = clip
else:
_A = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_A = ProcessorGradientFlow(device=self.device )
_A = iterations
_A = lr
_A = log
_A = make_grid
_A = return_val
_A = quantize
_A = self.vqgan.decoder.z_shape
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=5 , lowerCAmelCase_=True ) -> Any:
_A = []
if output_path is None:
_A = """./animation.gif"""
if input_path is None:
_A = self.save_path
_A = sorted(glob(input_path + """/*""" ) )
if not len(lowerCAmelCase_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(lowerCAmelCase_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_A = total_duration / len(lowerCAmelCase_ )
_A = [frame_duration] * len(lowerCAmelCase_ )
if extend_frames:
_A = 1.5
_A = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(lowerCAmelCase_ ) )
imageio.mimsave(lowerCAmelCase_ , lowerCAmelCase_ , duration=lowerCAmelCase_ )
print(F'''gif saved to {output_path}''' )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> str:
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_A = preprocess(Image.open(lowerCAmelCase_ ) , target_image_size=2_56 ).to(self.device )
_A = preprocess_vqgan(lowerCAmelCase_ )
_A , *_A = self.vqgan.encode(lowerCAmelCase_ )
return z
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_A = self.latent.detach().requires_grad_()
_A = base_latent + transform_vector
if self.quantize:
_A , *_A = self.vqgan.quantize(lowerCAmelCase_ )
else:
_A = trans_latent
return self.vqgan.decode(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_A = self.clip_preprocessor(text=lowerCAmelCase_ , images=lowerCAmelCase_ , return_tensors="""pt""" , padding=lowerCAmelCase_ )
_A = self.clip(**lowerCAmelCase_ )
_A = clip_outputs.logits_per_image
if weights is not None:
_A = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_A = self._get_clip_similarity(pos_prompts["""prompts"""] , lowerCAmelCase_ , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_A = self._get_clip_similarity(neg_prompts["""prompts"""] , lowerCAmelCase_ , weights=neg_prompts["""weights"""] )
else:
_A = torch.tensor([1] , device=self.device )
_A = -torch.log(lowerCAmelCase_ ) + torch.log(lowerCAmelCase_ )
return loss
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = torch.randn_like(self.latent , requires_grad=lowerCAmelCase_ , device=self.device )
_A = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_A = self._add_vector(lowerCAmelCase_ )
_A = loop_post_process(lowerCAmelCase_ )
_A = self._get_CLIP_loss(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
print("""CLIP loss""" , lowerCAmelCase_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=lowerCAmelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
wandb.init(reinit=lowerCAmelCase_ , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_A = Image.open(lowerCAmelCase_ )
_A = image.resize((2_56, 2_56) )
wandb.log("""Original Image""" , wandb.Image(lowerCAmelCase_ ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
if not prompts:
return []
_A = []
_A = []
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(lowerCAmelCase_ , (tuple, list) ):
_A = prompt[0]
_A = float(prompt[1] )
elif ":" in prompt:
_A , _A = prompt.split(""":""" )
_A = float(lowerCAmelCase_ )
else:
_A = prompt
_A = 1.0
processed_prompts.append(lowerCAmelCase_ )
weights.append(lowerCAmelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase_ , device=self.device ),
}
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , ) -> str:
if image_path:
_A = self._get_latent(lowerCAmelCase_ )
else:
_A = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
_A = self.process_prompts(lowerCAmelCase_ )
_A = self.process_prompts(lowerCAmelCase_ )
if save_final and save_path is None:
_A = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
else:
_A = save_path + """_""" + get_timestamp()
os.makedirs(lowerCAmelCase_ )
_A = save_path
_A = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(lowerCAmelCase_ ) )
_A = loop_post_process(lowerCAmelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ):
if show_intermediate:
show_pil(lowerCAmelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(lowerCAmelCase_ )} )
if show_final:
show_pil(lowerCAmelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 81 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = HfArgumentParser(_SCREAMING_SNAKE_CASE )
__a = parser.parse_args_into_dataclasses()[0]
__a = TensorFlowBenchmark(args=_SCREAMING_SNAKE_CASE )
try:
__a = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__a = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
__a = """ """.join(str(_SCREAMING_SNAKE_CASE ).split(""" """ )[:-1] )
__a = """"""
__a = eval(str(_SCREAMING_SNAKE_CASE ).split(""" """ )[-1] )
__a = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__a = full_error_msg + begin_error_msg + str(_SCREAMING_SNAKE_CASE )
raise ValueError(_SCREAMING_SNAKE_CASE )
benchmark.run()
if __name__ == "__main__":
main()
| 302 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = ['pixel_values']
def __init__(self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ):
super().__init__(**__lowercase )
__lowerCAmelCase = size if size is not None else {'''shortest_edge''': 3_84}
__lowerCAmelCase = get_size_dict(__lowercase , default_to_square=__lowercase )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
# Default value set here for backwards compatibility where the value in config is None
__lowerCAmelCase = crop_pct if crop_pct is not None else 2_24 / 2_56
__lowerCAmelCase = resample
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ):
__lowerCAmelCase = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
__lowerCAmelCase = size['''shortest_edge''']
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__lowerCAmelCase = int(shortest_edge / crop_pct )
__lowerCAmelCase = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
__lowerCAmelCase = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ):
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ):
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ):
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__lowercase , default_to_square=__lowercase )
__lowerCAmelCase = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__lowerCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 174 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : str = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=0 ):
lowercase : str = floats_tensor((1, 3, 128, 128) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = np.random.RandomState(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = self.get_dummy_inputs()
lowercase : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
lowercase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
lowercase : str = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : str = self.get_dummy_inputs()
lowercase : Tuple = pipe(**SCREAMING_SNAKE_CASE__ ).images
lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase : Any = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
lowercase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
# warmup pass to apply optimizations
lowercase : Any = pipe(**self.get_dummy_inputs() )
lowercase : List[Any] = self.get_dummy_inputs()
lowercase : Tuple = pipe(**SCREAMING_SNAKE_CASE__ ).images
lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase : Any = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
lowercase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.get_dummy_inputs()
lowercase : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase : List[Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
lowercase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = self.get_dummy_inputs()
lowercase : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase : Dict = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCamelCase ( self ):
lowercase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = self.get_dummy_inputs()
lowercase : List[Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase : List[Any] = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def __lowerCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCamelCase ( self ):
lowercase : Any = ort.SessionOptions()
lowercase : Any = False
return options
def __lowerCamelCase ( self ):
lowercase : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowercase : Any = init_image.resize((768, 512) )
# using the PNDM scheduler by default
lowercase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = '''A fantasy landscape, trending on artstation'''
lowercase : List[str] = np.random.RandomState(0 )
lowercase : Optional[int] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' , )
lowercase : Optional[int] = output.images
lowercase : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowercase : Tuple = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __lowerCamelCase ( self ):
lowercase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowercase : List[Any] = init_image.resize((768, 512) )
lowercase : Optional[int] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowercase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : Any = '''A fantasy landscape, trending on artstation'''
lowercase : List[Any] = np.random.RandomState(0 )
lowercase : int = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' , )
lowercase : str = output.images
lowercase : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowercase : Tuple = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 370 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 173 | 0 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__A =HfArgumentParser(InitializationArguments)
__A =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__A =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__A ={
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
__A =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__A =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 163 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = 3_8_4
if "tiny" in model_name:
UpperCAmelCase__ : int = [3, 3, 9, 3]
UpperCAmelCase__ : Union[str, Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
UpperCAmelCase__ : Optional[int] = [3, 3, 2_7, 3]
UpperCAmelCase__ : Dict = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
UpperCAmelCase__ : List[str] = [3, 3, 2_7, 3]
UpperCAmelCase__ : str = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
UpperCAmelCase__ : Optional[int] = 5_1_2
if "large" in model_name:
UpperCAmelCase__ : Optional[Any] = [3, 3, 2_7, 3]
UpperCAmelCase__ : Optional[int] = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
UpperCAmelCase__ : Optional[int] = 7_6_8
if "xlarge" in model_name:
UpperCAmelCase__ : Tuple = [3, 3, 2_7, 3]
UpperCAmelCase__ : int = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
UpperCAmelCase__ : int = 1_0_2_4
# set label information
UpperCAmelCase__ : Tuple = 1_5_0
UpperCAmelCase__ : Union[str, Any] = """huggingface/label-files"""
UpperCAmelCase__ : Tuple = """ade20k-id2label.json"""
UpperCAmelCase__ : Dict = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase__ : str = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : str = ConvNextConfig(
depths=UpperCamelCase__ , hidden_sizes=UpperCamelCase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
UpperCAmelCase__ : Union[str, Any] = UperNetConfig(
backbone_config=UpperCamelCase__ , auxiliary_in_channels=UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , )
return config
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : str = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Tuple = dct.pop(UpperCamelCase__ )
UpperCAmelCase__ : Dict = val
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : List[str] = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
UpperCAmelCase__ : Tuple = model_name_to_url[model_name]
UpperCAmelCase__ : int = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )["""state_dict"""]
UpperCAmelCase__ : int = get_upernet_config(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = UperNetForSemanticSegmentation(UpperCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase__ : Optional[Any] = state_dict.pop(UpperCamelCase__ )
if "bn" in key:
UpperCAmelCase__ : List[str] = key.replace("""bn""" , """batch_norm""" )
UpperCAmelCase__ : List[Any] = val
# rename keys
UpperCAmelCase__ : Any = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify on image
UpperCAmelCase__ : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
UpperCAmelCase__ : Optional[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
UpperCAmelCase__ : Optional[int] = SegformerImageProcessor()
UpperCAmelCase__ : Dict = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
UpperCAmelCase__ : Dict = model(UpperCamelCase__ )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase__ : Any = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase__ : Dict = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase__ : str = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase__ : List[str] = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__A =parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 163 | 1 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCAmelCase_ = 637_8137.0
lowerCAmelCase_ = 635_6752.31_4245
lowerCAmelCase_ = 637_8137
def lowerCamelCase_ ( lowerCAmelCase: float , lowerCAmelCase: float , lowerCAmelCase: float , lowerCAmelCase: float )-> float:
_snake_case : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_snake_case : Optional[int] = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) )
_snake_case : Any = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_snake_case : Optional[Any] = haversine_distance(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_snake_case : Union[str, Any] = (b_lata + b_lata) / 2
_snake_case : str = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_snake_case : Any = (sin(lowerCAmelCase ) ** 2) * (cos(lowerCAmelCase ) ** 2)
_snake_case : int = cos(sigma / 2 ) ** 2
_snake_case : int = (sigma - sin(lowerCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_snake_case : List[str] = (cos(lowerCAmelCase ) ** 2) * (sin(lowerCAmelCase ) ** 2)
_snake_case : str = sin(sigma / 2 ) ** 2
_snake_case : Optional[Any] = (sigma + sin(lowerCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: int , lowerCAmelCase: List[Any] )-> Dict:
# Initialise PyTorch model
_snake_case : Dict = RemBertConfig.from_json_file(lowerCAmelCase )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase ) ) )
_snake_case : Optional[Any] = RemBertModel(lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase ) )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 260 | 1 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 4_0_0 * 2**2_0, 6_0_0 * 2**2_0] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 1_0_0 * 2**2_0, 9_0_0 * 2**2_0] )
def __UpperCAmelCase ( A : List[str] , A : Optional[Any] , A : str ) -> List[Any]:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , A )
UpperCAmelCase_ : str = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCAmelCase_ : Optional[int] = dataset_size < in_memory_max_size
else:
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = is_small_dataset(A )
assert result == expected
| 304 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE : int = {
"gpt-neox-20b": 2048,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = VOCAB_FILES_NAMES
lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = ["input_ids", "attention_mask"]
def __init__( self , a__=None , a__=None , a__=None , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**a__ )
snake_case_ = add_prefix_space
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def lowerCAmelCase__ ( self , a__ ) -> List[int]:
'''simple docstring'''
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
| 85 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A: int = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[Any] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: int = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Any = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A: List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 76 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : list , UpperCamelCase : list ):
_validate_point(UpperCamelCase )
_validate_point(UpperCamelCase )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase , UpperCamelCase ) ) )
def _snake_case ( UpperCamelCase : list[float] ):
if point:
if isinstance(UpperCamelCase , UpperCamelCase ):
for item in point:
if not isinstance(UpperCamelCase , (int, float) ):
UpperCAmelCase : Any = (
"""Expected a list of numbers as input, found """
F"{type(UpperCamelCase ).__name__}"
)
raise TypeError(UpperCamelCase )
else:
UpperCAmelCase : int = F"Expected a list of numbers as input, found {type(UpperCamelCase ).__name__}"
raise TypeError(UpperCamelCase )
else:
raise ValueError("""Missing an input""" )
def _snake_case ( UpperCamelCase : list , UpperCamelCase : list ):
_validate_point(UpperCamelCase )
_validate_point(UpperCamelCase )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase , UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = ['''speech''']
def __init__( self : List[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any])-> Tuple:
'''simple docstring'''
requires_backends(self , ["speech"])
class snake_case ( metaclass=lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : List[Any] = ['''speech''']
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Dict)-> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["speech"])
| 217 |
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
__lowercase : Optional[int] = np.array(__lowerCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__lowercase : Optional[int] = 0
__lowercase : Union[str, Any] = 0
__lowercase : Optional[Any] = 0
__lowercase : str = 0
# compute the shape of the output matrix
__lowercase : Optional[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__lowercase : List[str] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__lowercase : Optional[int] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowercase : Any = 0
__lowercase : List[Any] = 0
return updated_arr
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
__lowercase : Optional[Any] = np.array(__lowerCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__lowercase : int = 0
__lowercase : str = 0
__lowercase : List[str] = 0
__lowercase : Dict = 0
# compute the shape of the output matrix
__lowercase : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__lowercase : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__lowercase : str = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowercase : int = 0
__lowercase : Tuple = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
__lowerCAmelCase : List[Any] = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 156 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A ( a , a , unittest.TestCase ):
__A = AutoencoderKL
__A = """sample"""
__A = 1E-2
@property
def _snake_case ( self ):
lowerCamelCase =4
lowerCamelCase =3
lowerCamelCase =(32, 32)
lowerCamelCase =floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
return {"sample": image}
@property
def _snake_case ( self ):
return (3, 32, 32)
@property
def _snake_case ( self ):
return (3, 32, 32)
def _snake_case ( self ):
lowerCamelCase ={
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
lowerCamelCase =self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self ):
pass
def _snake_case ( self ):
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def _snake_case ( self ):
# enable deterministic behavior for gradient checkpointing
lowerCamelCase , lowerCamelCase =self.prepare_init_args_and_inputs_for_common()
lowerCamelCase =self.model_class(**UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
assert not model.is_gradient_checkpointing and model.training
lowerCamelCase =model(**UpperCAmelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowerCamelCase =torch.randn_like(UpperCAmelCase_ )
lowerCamelCase =(out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowerCamelCase =self.model_class(**UpperCAmelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(UpperCAmelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowerCamelCase =model_a(**UpperCAmelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowerCamelCase =(out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
lowerCamelCase =dict(model.named_parameters() )
lowerCamelCase =dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase =AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase_ )
lowerCamelCase =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _snake_case ( self ):
lowerCamelCase =AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
lowerCamelCase =model.to(UpperCAmelCase_ )
model.eval()
if torch_device == "mps":
lowerCamelCase =torch.manual_seed(0 )
else:
lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
lowerCamelCase =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase =image.to(UpperCAmelCase_ )
with torch.no_grad():
lowerCamelCase =model(UpperCAmelCase_ , sample_posterior=UpperCAmelCase_ , generator=UpperCAmelCase_ ).sample
lowerCamelCase =output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowerCamelCase =torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
lowerCamelCase =torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
lowerCamelCase =torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-2 ) )
@slow
class __A ( unittest.TestCase ):
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
return f"""gaussian_noise_s={seed}_shape={"_".join([str(UpperCAmelCase_ ) for s in shape] )}.npy"""
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , UpperCAmelCase_=0 , UpperCAmelCase_=(4, 3, 512, 512) , UpperCAmelCase_=False ):
lowerCamelCase =torch.floataa if fpaa else torch.floataa
lowerCamelCase =torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase_ , UpperCAmelCase_ ) ) ).to(UpperCAmelCase_ ).to(UpperCAmelCase_ )
return image
def _snake_case ( self , UpperCAmelCase_="CompVis/stable-diffusion-v1-4" , UpperCAmelCase_=False ):
lowerCamelCase ="""fp16""" if fpaa else None
lowerCamelCase =torch.floataa if fpaa else torch.floataa
lowerCamelCase =AutoencoderKL.from_pretrained(
UpperCAmelCase_ , subfolder="""vae""" , torch_dtype=UpperCAmelCase_ , revision=UpperCAmelCase_ , )
model.to(UpperCAmelCase_ ).eval()
return model
def _snake_case ( self , UpperCAmelCase_=0 ):
if torch_device == "mps":
return torch.manual_seed(UpperCAmelCase_ )
return torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =self.get_sd_vae_model()
lowerCamelCase =self.get_sd_image(UpperCAmelCase_ )
lowerCamelCase =self.get_generator(UpperCAmelCase_ )
with torch.no_grad():
lowerCamelCase =model(UpperCAmelCase_ , generator=UpperCAmelCase_ , sample_posterior=UpperCAmelCase_ ).sample
assert sample.shape == image.shape
lowerCamelCase =sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCamelCase =torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =self.get_sd_vae_model(fpaa=UpperCAmelCase_ )
lowerCamelCase =self.get_sd_image(UpperCAmelCase_ , fpaa=UpperCAmelCase_ )
lowerCamelCase =self.get_generator(UpperCAmelCase_ )
with torch.no_grad():
lowerCamelCase =model(UpperCAmelCase_ , generator=UpperCAmelCase_ , sample_posterior=UpperCAmelCase_ ).sample
assert sample.shape == image.shape
lowerCamelCase =sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCamelCase =torch.tensor(UpperCAmelCase_ )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =self.get_sd_vae_model()
lowerCamelCase =self.get_sd_image(UpperCAmelCase_ )
with torch.no_grad():
lowerCamelCase =model(UpperCAmelCase_ ).sample
assert sample.shape == image.shape
lowerCamelCase =sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCamelCase =torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =self.get_sd_vae_model()
lowerCamelCase =self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCamelCase =model.decode(UpperCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowerCamelCase =sample[-1, -2:, :2, -2:].flatten().cpu()
lowerCamelCase =torch.tensor(UpperCAmelCase_ )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =self.get_sd_vae_model(fpaa=UpperCAmelCase_ )
lowerCamelCase =self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase_ )
with torch.no_grad():
lowerCamelCase =model.decode(UpperCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowerCamelCase =sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCamelCase =torch.tensor(UpperCAmelCase_ )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =self.get_sd_vae_model(fpaa=UpperCAmelCase_ )
lowerCamelCase =self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase_ )
with torch.no_grad():
lowerCamelCase =model.decode(UpperCAmelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCamelCase =model.decode(UpperCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =self.get_sd_vae_model()
lowerCamelCase =self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCamelCase =model.decode(UpperCAmelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCamelCase =model.decode(UpperCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =self.get_sd_vae_model()
lowerCamelCase =self.get_sd_image(UpperCAmelCase_ )
lowerCamelCase =self.get_generator(UpperCAmelCase_ )
with torch.no_grad():
lowerCamelCase =model.encode(UpperCAmelCase_ ).latent_dist
lowerCamelCase =dist.sample(generator=UpperCAmelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowerCamelCase =sample[0, -1, -3:, -3:].flatten().cpu()
lowerCamelCase =torch.tensor(UpperCAmelCase_ )
lowerCamelCase =3E-3 if torch_device != """mps""" else 1E-2
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=UpperCAmelCase_ )
| 361 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 262 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
UpperCamelCase__ : int = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
UpperCamelCase__ : List[str] = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
UpperCamelCase__ : Union[str, Any] = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int = CHRF.CHAR_ORDER , lowerCAmelCase__ : int = CHRF.WORD_ORDER , lowerCAmelCase__ : int = CHRF.BETA , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__SCREAMING_SNAKE_CASE : Optional[int] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
__SCREAMING_SNAKE_CASE : Optional[Any] = CHRF(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = sb_chrf.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
} | 112 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : Any = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 112 | 1 |
'''simple docstring'''
lowercase : Tuple = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 311 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Tuple = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''pix2struct_text_model'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A : str = vocab_size
A : List[str] = hidden_size
A : List[Any] = d_kv
A : Optional[Any] = d_ff
A : Dict = num_layers
A : Dict = num_heads
A : Optional[int] = relative_attention_num_buckets
A : Optional[Any] = relative_attention_max_distance
A : Dict = dropout_rate
A : Dict = layer_norm_epsilon
A : Tuple = initializer_factor
A : Union[str, Any] = use_cache
A : int = eos_token_id
A : List[str] = decoder_start_token_id
# for backwards compatibility
A : int = dense_act_fn
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Union[str, Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct_vision_model'''
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : List[str] = hidden_size
A : Optional[Any] = patch_embed_hidden_size
A : Union[str, Any] = d_ff
A : Dict = dropout_rate
A : str = num_hidden_layers
A : Dict = num_attention_heads
A : Tuple = initializer_range
A : List[str] = initializer_factor
A : Union[str, Any] = attention_dropout
A : Tuple = layer_norm_eps
A : int = dense_act_fn
A : Optional[int] = seq_len
A : Tuple = relative_attention_num_buckets
A : str = relative_attention_max_distance
A : Optional[Any] = d_kv
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct'''
__magic_name__ = True
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text_config is None:
A : Dict = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
A : str = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE )
A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE )
A : Any = self.text_config.decoder_start_token_id
A : Any = self.text_config.pad_token_id
A : Dict = self.text_config.eos_token_id
A : Union[str, Any] = initializer_factor
A : Tuple = initializer_range
A : Optional[Any] = self.initializer_range
A : int = self.initializer_range
A : Tuple = is_vqa
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Tuple = copy.deepcopy(self.__dict__ )
A : Dict = self.text_config.to_dict()
A : int = self.vision_config.to_dict()
A : Any = self.__class__.model_type
return output
| 311 | 1 |
a ={
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 73 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = checkpoints.load_tax_checkpoint(_A )
lowerCAmelCase_ = flatten_dict(_A )
return flax_params
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCAmelCase_ = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCAmelCase_ = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCAmelCase_ = new_key.replace(_A , _A )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCAmelCase_ = new_key.replace(_A , _A )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A )
lowerCAmelCase_ = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A )
lowerCAmelCase_ = flax_dict[key]
lowerCAmelCase_ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCAmelCase_ = torch.from_numpy(converted_dict[key].T )
else:
lowerCAmelCase_ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __UpperCamelCase ( _A , _A , _A=False , _A=False ):
lowerCAmelCase_ = get_flax_param(_A )
if not use_large:
lowerCAmelCase_ = PixaStructVisionConfig()
lowerCAmelCase_ = PixaStructTextConfig()
else:
lowerCAmelCase_ = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCAmelCase_ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCAmelCase_ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_A )
lowerCAmelCase_ = PixaStructForConditionalGeneration(_A )
lowerCAmelCase_ = rename_and_convert_flax_params(_A )
model.load_state_dict(_A )
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCAmelCase_ = PixaStructImageProcessor()
lowerCAmelCase_ = PixaStructProcessor(image_processor=_A , tokenizer=_A )
if use_large:
lowerCAmelCase_ = 4096
lowerCAmelCase_ = True
# mkdir if needed
os.makedirs(_A , exist_ok=_A )
model.save_pretrained(_A )
processor.save_pretrained(_A )
print('''Model saved in {}'''.format(_A ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 278 | 0 |
'''simple docstring'''
class __magic_name__ :
def __init__( self , snake_case , snake_case , snake_case) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int =None
_UpperCAmelCase : Optional[int] =None
_UpperCAmelCase : int =graph
self._normalize_graph(snake_case , snake_case)
_UpperCAmelCase : List[str] =len(snake_case)
_UpperCAmelCase : Union[str, Any] =None
def lowerCAmelCase ( self , snake_case , snake_case) -> Optional[Any]:
'''simple docstring'''
if sources is int:
_UpperCAmelCase : List[str] =[sources]
if sinks is int:
_UpperCAmelCase : Optional[Any] =[sinks]
if len(snake_case) == 0 or len(snake_case) == 0:
return
_UpperCAmelCase : int =sources[0]
_UpperCAmelCase : Optional[Any] =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(snake_case) > 1 or len(snake_case) > 1:
_UpperCAmelCase : Any =0
for i in sources:
max_input_flow += sum(self.graph[i])
_UpperCAmelCase : List[Any] =len(self.graph) + 1
for room in self.graph:
room.insert(0 , 0)
self.graph.insert(0 , [0] * size)
for i in sources:
_UpperCAmelCase : int =max_input_flow
_UpperCAmelCase : Dict =0
_UpperCAmelCase : Optional[Any] =len(self.graph) + 1
for room in self.graph:
room.append(0)
self.graph.append([0] * size)
for i in sinks:
_UpperCAmelCase : Union[str, Any] =max_input_flow
_UpperCAmelCase : Optional[int] =size - 1
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.')
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase ( self , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =algorithm(self)
class __magic_name__ :
def __init__( self , snake_case) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : str =flow_network
_UpperCAmelCase : Dict =flow_network.verticesCount
_UpperCAmelCase : Tuple =flow_network.sourceIndex
_UpperCAmelCase : Optional[Any] =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_UpperCAmelCase : List[str] =flow_network.graph
_UpperCAmelCase : Dict =False
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_UpperCAmelCase : Any =True
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , snake_case) -> Dict:
'''simple docstring'''
super().__init__(snake_case)
# use this to save your result
_UpperCAmelCase : List[str] =-1
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!')
return self.maximum_flow
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , snake_case) -> str:
'''simple docstring'''
super().__init__(snake_case)
_UpperCAmelCase : Optional[Any] =[[0] * self.verticies_count for i in range(self.verticies_count)]
_UpperCAmelCase : Tuple =[0] * self.verticies_count
_UpperCAmelCase : List[Any] =[0] * self.verticies_count
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : int =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index]):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_UpperCAmelCase : int =[
i
for i in range(self.verticies_count)
if i != self.source_index and i != self.sink_index
]
# move through list
_UpperCAmelCase : Optional[int] =0
while i < len(snake_case):
_UpperCAmelCase : str =vertices_list[i]
_UpperCAmelCase : Optional[Any] =self.heights[vertex_index]
self.process_vertex(snake_case)
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(snake_case))
_UpperCAmelCase : Optional[int] =0
else:
i += 1
_UpperCAmelCase : Optional[int] =sum(self.preflow[self.source_index])
def lowerCAmelCase ( self , snake_case) -> Any:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(snake_case , snake_case)
self.relabel(snake_case)
def lowerCAmelCase ( self , snake_case , snake_case) -> Dict:
'''simple docstring'''
_UpperCAmelCase : int =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase ( self , snake_case) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict =None
for to_index in range(self.verticies_count):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_UpperCAmelCase : Tuple =self.heights[to_index]
if min_height is not None:
_UpperCAmelCase : Dict =min_height + 1
if __name__ == "__main__":
lowercase =[0]
lowercase =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowercase =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowercase =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowercase =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 362 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase =logging.get_logger(__name__)
lowercase ={
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="glpn"
def __init__( self , snake_case=3 , snake_case=4 , snake_case=[2, 2, 2, 2] , snake_case=[8, 4, 2, 1] , snake_case=[3_2, 6_4, 1_6_0, 2_5_6] , snake_case=[7, 3, 3, 3] , snake_case=[4, 2, 2, 2] , snake_case=[1, 2, 5, 8] , snake_case=[4, 4, 4, 4] , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=0.1 , snake_case=1E-6 , snake_case=6_4 , snake_case=1_0 , snake_case=-1 , **snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__(**snake_case)
_UpperCAmelCase : Any =num_channels
_UpperCAmelCase : List[str] =num_encoder_blocks
_UpperCAmelCase : Optional[Any] =depths
_UpperCAmelCase : str =sr_ratios
_UpperCAmelCase : Dict =hidden_sizes
_UpperCAmelCase : List[str] =patch_sizes
_UpperCAmelCase : Any =strides
_UpperCAmelCase : List[str] =mlp_ratios
_UpperCAmelCase : Dict =num_attention_heads
_UpperCAmelCase : List[str] =hidden_act
_UpperCAmelCase : int =hidden_dropout_prob
_UpperCAmelCase : List[Any] =attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] =initializer_range
_UpperCAmelCase : Tuple =drop_path_rate
_UpperCAmelCase : str =layer_norm_eps
_UpperCAmelCase : Optional[int] =decoder_hidden_size
_UpperCAmelCase : List[str] =max_depth
_UpperCAmelCase : Dict =head_in_index
| 242 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_A : Dict =logging.get_logger(__name__)
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , *UpperCamelCase__: Dict , **UpperCamelCase__: int ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 41 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] =logging.get_logger(__name__)
_A : Dict =['''model.decoder.embed_positions.weights''']
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
if "emb" in name:
lowerCamelCase__ : Dict = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowerCamelCase__ : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowerCamelCase__ : List[str] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowerCamelCase__ : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowerCamelCase__ : Dict = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowerCamelCase__ : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCamelCase__ : int = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple[Dict, Dict]:
lowerCamelCase__ : int = list(state_dict.keys() )
lowerCamelCase__ : Tuple = {}
for key in keys:
lowerCamelCase__ : Any = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = rename_keys(UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCamelCase__ : Union[str, Any] = val[:hidden_size, :]
lowerCamelCase__ : Any = val[hidden_size : 2 * hidden_size, :]
lowerCamelCase__ : Optional[int] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCamelCase__ : str = val
else:
lowerCamelCase__ : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowerCamelCase__ : int = 1024
lowerCamelCase__ : int = 24
lowerCamelCase__ : List[Any] = 16
elif checkpoint == "medium":
lowerCamelCase__ : Any = 1536
lowerCamelCase__ : Union[str, Any] = 48
lowerCamelCase__ : Optional[int] = 24
elif checkpoint == "large":
lowerCamelCase__ : Optional[Any] = 2048
lowerCamelCase__ : Dict = 48
lowerCamelCase__ : List[Any] = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowerCamelCase__ : Any = MusicgenDecoderConfig(
hidden_size=UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase , num_attention_heads=UpperCamelCase , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="cpu" ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = MusicGen.get_pretrained(UpperCamelCase , device=UpperCamelCase )
lowerCamelCase__ : List[Any] = decoder_config_from_checkpoint(UpperCamelCase )
lowerCamelCase__ : Any = fairseq_model.lm.state_dict()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = rename_state_dict(
UpperCamelCase , hidden_size=decoder_config.hidden_size )
lowerCamelCase__ : str = TaEncoderModel.from_pretrained("""t5-base""" )
lowerCamelCase__ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowerCamelCase__ : Optional[int] = MusicgenForCausalLM(UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCamelCase__ , lowerCamelCase__ : List[str] = decoder.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowerCamelCase__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase , audio_encoder=UpperCamelCase , decoder=UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase )
# check we can do a forward pass
lowerCamelCase__ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCamelCase__ : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""t5-base""" )
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowerCamelCase__ : Optional[int] = MusicgenProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
# set the appropriate bos/pad token ids
lowerCamelCase__ : Union[str, Any] = 2048
lowerCamelCase__ : List[str] = 2048
# set other default generation config params
lowerCamelCase__ : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCamelCase )
processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
_A : List[str] =parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 41 | 1 |
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
# load base model
UpperCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
UpperCAmelCase = load_file(lowercase_ )
UpperCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
UpperCAmelCase = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
UpperCAmelCase = pipeline.text_encoder
else:
UpperCAmelCase = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
UpperCAmelCase = pipeline.unet
# find the target layer
UpperCAmelCase = layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
UpperCAmelCase = curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
UpperCAmelCase = layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
UpperCAmelCase = layer_infos.pop(0 )
UpperCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
UpperCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
UpperCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
UpperCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
UpperCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
snake_case_ = parser.parse_args()
snake_case_ = args.base_model_path
snake_case_ = args.checkpoint_path
snake_case_ = args.dump_path
snake_case_ = args.lora_prefix_unet
snake_case_ = args.lora_prefix_text_encoder
snake_case_ = args.alpha
snake_case_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
snake_case_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 359 |
"""simple docstring"""
from collections import deque
class A_ :
"""simple docstring"""
def __init__( self :Any , lowercase_ :str , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = process_name # process name
UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase = arrival_time
UpperCAmelCase = burst_time # remaining burst time
UpperCAmelCase = 0 # total time of the process wait in ready queue
UpperCAmelCase = 0 # time from arrival time to completion time
class A_ :
"""simple docstring"""
def __init__( self :Any , lowercase_ :int , lowercase_ :list[int] , lowercase_ :deque[Process] , lowercase_ :int , ) -> None:
# total number of mlfq's queues
UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase = queue
# current time
UpperCAmelCase = current_time
# finished process is in this sequence queue
UpperCAmelCase = deque()
def UpperCAmelCase__ ( self :Optional[int] ) -> list[str]:
UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase__ ( self :List[str] , lowercase_ :list[Process] ) -> list[int]:
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase__ ( self :List[str] , lowercase_ :list[Process] ) -> list[int]:
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase__ ( self :Dict , lowercase_ :list[Process] ) -> list[int]:
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase__ ( self :str , lowercase_ :deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def UpperCAmelCase__ ( self :int , lowercase_ :Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :deque[Process] ) -> deque[Process]:
UpperCAmelCase = deque() # sequence deque of finished process
while len(lowercase_ ) != 0:
UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowercase_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase = 0
# set the process's turnaround time because it is finished
UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(lowercase_ )
self.finish_queue.extend(lowercase_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase__ ( self :Tuple , lowercase_ :deque[Process] , lowercase_ :int ) -> tuple[deque[Process], deque[Process]]:
UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowercase_ ) ):
UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowercase_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowercase_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase = 0
# set the finish time
UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowercase_ )
self.finish_queue.extend(lowercase_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase__ ( self :Optional[Any] ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase , UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
snake_case_ = Process("""P1""", 0, 53)
snake_case_ = Process("""P2""", 0, 17)
snake_case_ = Process("""P3""", 0, 68)
snake_case_ = Process("""P4""", 0, 24)
snake_case_ = 3
snake_case_ = [17, 25]
snake_case_ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
snake_case_ = Process("""P1""", 0, 53)
snake_case_ = Process("""P2""", 0, 17)
snake_case_ = Process("""P3""", 0, 68)
snake_case_ = Process("""P4""", 0, 24)
snake_case_ = 3
snake_case_ = [17, 25]
snake_case_ = deque([Pa, Pa, Pa, Pa])
snake_case_ = MLFQ(number_of_queues, time_slices, queue, 0)
snake_case_ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 181 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger(__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = DPTConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE = 1024
SCREAMING_SNAKE_CASE = 4096
SCREAMING_SNAKE_CASE = 24
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = [5, 11, 17, 23]
SCREAMING_SNAKE_CASE = [256, 512, 1024, 1024]
SCREAMING_SNAKE_CASE = (1, 384, 384)
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 150
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
SCREAMING_SNAKE_CASE = 'ade20k-id2label.json'
SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset')) , 'r'))
SCREAMING_SNAKE_CASE = {int(_UpperCAmelCase): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
SCREAMING_SNAKE_CASE = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
SCREAMING_SNAKE_CASE = name.replace('proj' , 'projection')
if "blocks" in name:
SCREAMING_SNAKE_CASE = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
SCREAMING_SNAKE_CASE = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
SCREAMING_SNAKE_CASE = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
SCREAMING_SNAKE_CASE = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
SCREAMING_SNAKE_CASE = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
SCREAMING_SNAKE_CASE = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
SCREAMING_SNAKE_CASE = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
SCREAMING_SNAKE_CASE = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
SCREAMING_SNAKE_CASE = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
SCREAMING_SNAKE_CASE = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
SCREAMING_SNAKE_CASE = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
SCREAMING_SNAKE_CASE = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
SCREAMING_SNAKE_CASE = name.replace('conv1' , 'convolution1')
if "conv2" in name:
SCREAMING_SNAKE_CASE = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained' , 'dpt')
if "bn" in name:
SCREAMING_SNAKE_CASE = name.replace('bn' , 'batch_norm')
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace('head' , 'head.head')
if "encoder.norm" in name:
SCREAMING_SNAKE_CASE = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
SCREAMING_SNAKE_CASE = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''')
SCREAMING_SNAKE_CASE = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dpt_config(_UpperCAmelCase)
# load original state_dict from URL
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu')
# remove certain keys
remove_ignore_keys_(_UpperCAmelCase)
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = val
# read in qkv matrices
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase)
# load HuggingFace model
SCREAMING_SNAKE_CASE = DPTForSemanticSegmentation(_UpperCAmelCase) if 'ade' in checkpoint_url else DPTForDepthEstimation(_UpperCAmelCase)
model.load_state_dict(_UpperCAmelCase)
model.eval()
# Check outputs on an image
SCREAMING_SNAKE_CASE = 480 if 'ade' in checkpoint_url else 384
SCREAMING_SNAKE_CASE = DPTImageProcessor(size=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(_UpperCAmelCase , return_tensors='pt')
# forward pass
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase).logits if 'ade' in checkpoint_url else model(**_UpperCAmelCase).predicted_depth
# Assert logits
SCREAMING_SNAKE_CASE = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]])
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]])
assert outputs.shape == torch.Size(_UpperCAmelCase)
assert (
torch.allclose(outputs[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _UpperCAmelCase)
)
Path(_UpperCAmelCase).mkdir(exist_ok=_UpperCAmelCase)
print(F'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_UpperCAmelCase)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_UpperCAmelCase)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
a_ : Optional[int] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 137 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ : int = logging.getLogger(__name__)
a_ : List[str] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a_ : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _snake_case :
_lowercase : Optional[str] = field(
default=A__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A__ )} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _snake_case :
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''The input training data file (a text file).'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
_lowercase : bool = field(default=A__ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
_lowercase : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_lowercase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
_lowercase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
_lowercase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
def _dataset(_UpperCAmelCase , _UpperCAmelCase=None):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask')
return LineByLineWithRefDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , ref_path=_UpperCAmelCase , )
return LineByLineTextDataset(tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size)
else:
return TextDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_UpperCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file)
elif args.train_data_files:
return ConcatDataset([_dataset(_UpperCAmelCase) for f in glob(args.train_data_files)])
else:
return _dataset(args.train_data_file , args.train_ref_file)
def lowerCamelCase__ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.')
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.')
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
if model_args.tokenizer_name:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name')
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch')
SCREAMING_SNAKE_CASE = AutoModelWithLMHead.from_config(_UpperCAmelCase)
model.resize_token_embeddings(len(_UpperCAmelCase))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).')
if data_args.block_size <= 0:
SCREAMING_SNAKE_CASE = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
SCREAMING_SNAKE_CASE = min(data_args.block_size , tokenizer.max_len)
# Get datasets
SCREAMING_SNAKE_CASE = (
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , cache_dir=model_args.cache_dir) if training_args.do_train else None
)
SCREAMING_SNAKE_CASE = (
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , evaluate=_UpperCAmelCase , cache_dir=model_args.cache_dir)
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
SCREAMING_SNAKE_CASE = DataCollatorForPermutationLanguageModeling(
tokenizer=_UpperCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
SCREAMING_SNAKE_CASE = DataCollatorForWholeWordMask(
tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability)
else:
SCREAMING_SNAKE_CASE = DataCollatorForLanguageModeling(
tokenizer=_UpperCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
SCREAMING_SNAKE_CASE = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_collator=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , prediction_loss_only=_UpperCAmelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=_UpperCAmelCase)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = math.exp(eval_output['eval_loss'])
SCREAMING_SNAKE_CASE = {'perplexity': perplexity}
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , 'eval_results_lm.txt')
if trainer.is_world_master():
with open(_UpperCAmelCase , 'w') as writer:
logger.info('***** Eval results *****')
for key in sorted(result.keys()):
logger.info(' %s = %s' , _UpperCAmelCase , str(result[key]))
writer.write('%s = %s\n' % (key, str(result[key])))
results.update(_UpperCAmelCase)
return results
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 137 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : list[list] ) ->list[list]:
A__ : Tuple = current_set.copy()
for row_index, row in enumerate(UpperCAmelCase__ ):
A__ : Union[str, Any] = row[0]
for column_index, column in enumerate(UpperCAmelCase__ ):
if magnitude == 0:
A__ : int = column
continue
A__ : Tuple = column / magnitude
# Subtract to cancel term
A__ : int = current_set[0]
A__ : Any = [first_row]
A__ : Optional[Any] = current_set[1::]
for row in current_set:
A__ : Dict = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(UpperCAmelCase__ )
continue
for column_index in range(len(UpperCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(UpperCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
A__ : int = final_set[0]
A__ : Union[str, Any] = []
A__ : Dict = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
A__ : int = simplify(UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, UpperCAmelCase__ )
A__ : int = resultant
return final_set
def _lowerCAmelCase ( UpperCAmelCase__ : list[list] ) ->list:
if len(UpperCAmelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
A__ : Tuple = len(UpperCAmelCase__ ) + 1
if any(len(UpperCAmelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(UpperCAmelCase__, (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(UpperCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
A__ : str = equations.copy()
if any(0 in row for row in data_set ):
A__ : List[str] = data_set.copy()
A__ : Tuple = []
for row_index, row in enumerate(UpperCAmelCase__ ):
if 0 not in row:
A__ : str = data_set.pop(UpperCAmelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0, UpperCAmelCase__ )
A__ : int = data_set.copy()
A__ : int = simplify(UpperCAmelCase__ )
A__ : int = simplified[::-1]
A__ : list = []
for row in simplified:
A__ : int = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
A__ : int = row.copy()[: len(UpperCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(UpperCAmelCase__ ) == 0:
solutions.append(0 )
continue
A__ : Dict = temp_row[1::]
A__ : Any = temp_row[::-1]
for column_index, column in enumerate(UpperCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(UpperCAmelCase__ )
A__ : Optional[int] = []
for item in solutions:
final.append(float(round(UpperCAmelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 296 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->List[str]:
A__ : Union[str, Any] = DPTConfig()
if "large" in checkpoint_url:
A__ : int = 1_0_2_4
A__ : Union[str, Any] = 4_0_9_6
A__ : Optional[int] = 2_4
A__ : int = 1_6
A__ : Union[str, Any] = [5, 1_1, 1_7, 2_3]
A__ : Tuple = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
A__ : Tuple = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
A__ : Optional[int] = True
A__ : int = 1_5_0
A__ : Union[str, Any] = """huggingface/label-files"""
A__ : List[Any] = """ade20k-id2label.json"""
A__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ) ), """r""" ) )
A__ : List[Any] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Dict = idalabel
A__ : List[Any] = {v: k for k, v in idalabel.items()}
A__ : Optional[Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Any:
A__ : List[Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->List[str]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ : str = name.replace("""pretrained.model""", """dpt.encoder""" )
if "pretrained.model" in name:
A__ : Dict = name.replace("""pretrained.model""", """dpt.embeddings""" )
if "patch_embed" in name:
A__ : List[Any] = name.replace("""patch_embed""", """patch_embeddings""" )
if "pos_embed" in name:
A__ : int = name.replace("""pos_embed""", """position_embeddings""" )
if "attn.proj" in name:
A__ : Tuple = name.replace("""attn.proj""", """attention.output.dense""" )
if "proj" in name and "project" not in name:
A__ : List[Any] = name.replace("""proj""", """projection""" )
if "blocks" in name:
A__ : Optional[Any] = name.replace("""blocks""", """layer""" )
if "mlp.fc1" in name:
A__ : int = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
A__ : List[str] = name.replace("""mlp.fc2""", """output.dense""" )
if "norm1" in name:
A__ : Any = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
A__ : List[str] = name.replace("""norm2""", """layernorm_after""" )
if "scratch.output_conv" in name:
A__ : Optional[int] = name.replace("""scratch.output_conv""", """head""" )
if "scratch" in name:
A__ : List[str] = name.replace("""scratch""", """neck""" )
if "layer1_rn" in name:
A__ : List[str] = name.replace("""layer1_rn""", """convs.0""" )
if "layer2_rn" in name:
A__ : Optional[int] = name.replace("""layer2_rn""", """convs.1""" )
if "layer3_rn" in name:
A__ : Any = name.replace("""layer3_rn""", """convs.2""" )
if "layer4_rn" in name:
A__ : Any = name.replace("""layer4_rn""", """convs.3""" )
if "refinenet" in name:
A__ : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ : str = name.replace(f'refinenet{layer_idx}', f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
A__ : Optional[Any] = name.replace("""out_conv""", """projection""" )
if "resConfUnit1" in name:
A__ : List[Any] = name.replace("""resConfUnit1""", """residual_layer1""" )
if "resConfUnit2" in name:
A__ : Tuple = name.replace("""resConfUnit2""", """residual_layer2""" )
if "conv1" in name:
A__ : Tuple = name.replace("""conv1""", """convolution1""" )
if "conv2" in name:
A__ : List[Any] = name.replace("""conv2""", """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.0.project.0""", """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ : Tuple = name.replace("""pretrained.act_postprocess2.0.project.0""", """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""", """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""", """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ : Any = name.replace("""pretrained.act_postprocess1.3""", """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
A__ : List[Any] = name.replace("""pretrained.act_postprocess1.4""", """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess2.3""", """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess2.4""", """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.3""", """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
A__ : Optional[int] = name.replace("""pretrained.act_postprocess4.3""", """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
A__ : Dict = name.replace("""pretrained.act_postprocess4.4""", """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
A__ : Union[str, Any] = name.replace("""pretrained""", """dpt""" )
if "bn" in name:
A__ : Union[str, Any] = name.replace("""bn""", """batch_norm""" )
if "head" in name:
A__ : Dict = name.replace("""head""", """head.head""" )
if "encoder.norm" in name:
A__ : Optional[int] = name.replace("""encoder.norm""", """layernorm""" )
if "auxlayer" in name:
A__ : List[str] = name.replace("""auxlayer""", """auxiliary_head.head""" )
return name
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Dict ) ->str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[str] = in_proj_weight[: config.hidden_size, :]
A__ : int = in_proj_bias[: config.hidden_size]
A__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : str = in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( ) ->List[str]:
A__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : int ) ->str:
A__ , A__ : Dict = get_dpt_config(UpperCAmelCase__ )
# load original state_dict from URL
A__ : Any = torch.hub.load_state_dict_from_url(UpperCAmelCase__, map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase__ )
# rename keys
for key in state_dict.copy().keys():
A__ : int = state_dict.pop(UpperCAmelCase__ )
A__ : str = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : Optional[Any] = DPTForSemanticSegmentation(UpperCAmelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
# Check outputs on an image
A__ : Optional[Any] = 4_8_0 if """ade""" in checkpoint_url else 3_8_4
A__ : Dict = DPTImageProcessor(size=UpperCAmelCase__ )
A__ : Optional[int] = prepare_img()
A__ : Any = image_processor(UpperCAmelCase__, return_tensors="""pt""" )
# forward pass
A__ : List[str] = model(**UpperCAmelCase__ ).logits if """ade""" in checkpoint_url else model(**UpperCAmelCase__ ).predicted_depth
# Assert logits
A__ : Optional[Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
A__ : Optional[int] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(UpperCAmelCase__ )
assert (
torch.allclose(outputs[0, 0, :3, :3], UpperCAmelCase__, atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], UpperCAmelCase__ )
)
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add model""", use_temp_dir=UpperCAmelCase__, )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase__, UpperCAmelCase__ ), organization="""nielsr""", commit_message="""Add image processor""", use_temp_dir=UpperCAmelCase__, )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
A_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 296 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class a ( nn.Module ):
"""simple docstring"""
a : int
a : jnp.dtype = jnp.floataa
def UpperCAmelCase ( self : Optional[int] ) -> str:
__UpperCAmelCase : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Dict , __lowercase : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = hidden_states.shape
__UpperCAmelCase : str = jax.image.resize(
__lowercase , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
__UpperCAmelCase : Optional[int] = self.conv(__lowercase )
return hidden_states
class a ( nn.Module ):
"""simple docstring"""
a : int
a : jnp.dtype = jnp.floataa
def UpperCAmelCase ( self : int ) -> Tuple:
__UpperCAmelCase : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[int] , __lowercase : Optional[int] ) -> Optional[int]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__UpperCAmelCase : Tuple = self.conv(__lowercase )
return hidden_states
class a ( nn.Module ):
"""simple docstring"""
a : int
a : int = None
a : float = 0.0
a : bool = None
a : jnp.dtype = jnp.floataa
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = self.in_channels if self.out_channels is None else self.out_channels
__UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : List[str] = nn.Conv(
__lowercase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : str = nn.Dense(__lowercase , dtype=self.dtype )
__UpperCAmelCase : List[Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : List[str] = nn.Dropout(self.dropout_prob )
__UpperCAmelCase : Optional[int] = nn.Conv(
__lowercase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__UpperCAmelCase : List[str] = None
if use_nin_shortcut:
__UpperCAmelCase : str = nn.Conv(
__lowercase , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self : Any , __lowercase : List[str] , __lowercase : str , __lowercase : str=True ) -> Dict:
__UpperCAmelCase : Union[str, Any] = hidden_states
__UpperCAmelCase : Optional[int] = self.norma(__lowercase )
__UpperCAmelCase : List[str] = nn.swish(__lowercase )
__UpperCAmelCase : List[Any] = self.conva(__lowercase )
__UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowercase ) )
__UpperCAmelCase : int = jnp.expand_dims(jnp.expand_dims(__lowercase , 1 ) , 1 )
__UpperCAmelCase : Union[str, Any] = hidden_states + temb
__UpperCAmelCase : List[Any] = self.norma(__lowercase )
__UpperCAmelCase : List[Any] = nn.swish(__lowercase )
__UpperCAmelCase : List[Any] = self.dropout(__lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = self.conva(__lowercase )
if self.conv_shortcut is not None:
__UpperCAmelCase : List[Any] = self.conv_shortcut(__lowercase )
return hidden_states + residual
| 114 |
from functools import lru_cache
@lru_cache
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowerCAmelCase : Any =logging.get_logger(__name__)
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """upernet"""
def __init__( self :Optional[int] , lowercase_ :str=None , lowercase_ :List[Any]=5_12 , lowercase_ :int=0.0_2 , lowercase_ :Optional[Any]=[1, 2, 3, 6] , lowercase_ :str=True , lowercase_ :Union[str, Any]=0.4 , lowercase_ :List[Any]=3_84 , lowercase_ :str=2_56 , lowercase_ :Tuple=1 , lowercase_ :int=False , lowercase_ :Any=2_55 , **lowercase_ :Any , )-> List[Any]:
super().__init__(**lowercase_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
A__ = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowercase_ , lowercase_ ):
A__ = backbone_config.get("model_type" )
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(lowercase_ )
A__ = backbone_config
A__ = hidden_size
A__ = initializer_range
A__ = pool_scales
A__ = use_auxiliary_head
A__ = auxiliary_loss_weight
A__ = auxiliary_in_channels
A__ = auxiliary_channels
A__ = auxiliary_num_convs
A__ = auxiliary_concat_input
A__ = loss_ignore_index
def UpperCAmelCase_ ( self :List[str] )-> Union[str, Any]:
A__ = copy.deepcopy(self.__dict__ )
A__ = self.backbone_config.to_dict()
A__ = self.__class__.model_type
return output | 353 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : str ):
A__ = torch.load(_lowerCamelCase , map_location="cpu" )
if "model" in sd.keys():
A__ = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
# pop unnecessary weights
A__ = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCamelCase )
A__ = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
A__ = sd.pop(_lowerCamelCase )
A__ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
A__ = sd[key]
# We split QKV in separate Q,K,V
A__ = key.replace(".qkv_proj." , ".q_proj." )
A__ = key.replace(".qkv_proj." , ".k_proj." )
A__ = key.replace(".qkv_proj." , ".v_proj." )
A__ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
A__, A__, A__ = torch.split(_lowerCamelCase , depth // 3 , dim=0 )
A__ = q
A__ = k
A__ = v
del sd[key]
return sd
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : Dict=None ):
A__ = load_checkpoint(_lowerCamelCase )
if config is not None:
A__ = OPTConfig.from_pretrained(_lowerCamelCase )
else:
A__ = OPTConfig()
A__ = OPTModel(_lowerCamelCase ).half().eval()
model.load_state_dict(_lowerCamelCase )
# Check results
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 123 | 0 |
UpperCAmelCase__ = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 339 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "conditional_detr"
__lowerCAmelCase = ["past_key_values"]
__lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=300 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.02 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=2 , __A=5 , __A=2 , __A=1 , __A=1 , __A=2 , __A=5 , __A=2 , __A=0.25 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
a =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__A , __A ):
a =backbone_config.get('''model_type''' )
a =CONFIG_MAPPING[backbone_model_type]
a =config_class.from_dict(__A )
a =use_timm_backbone
a =backbone_config
a =num_channels
a =num_queries
a =d_model
a =encoder_ffn_dim
a =encoder_layers
a =encoder_attention_heads
a =decoder_ffn_dim
a =decoder_layers
a =decoder_attention_heads
a =dropout
a =attention_dropout
a =activation_dropout
a =activation_function
a =init_std
a =init_xavier_std
a =encoder_layerdrop
a =decoder_layerdrop
a =encoder_layers
a =auxiliary_loss
a =position_embedding_type
a =backbone
a =use_pretrained_backbone
a =dilation
# Hungarian matcher
a =class_cost
a =bbox_cost
a =giou_cost
# Loss coefficients
a =mask_loss_coefficient
a =dice_loss_coefficient
a =cls_loss_coefficient
a =bbox_loss_coefficient
a =giou_loss_coefficient
a =focal_alpha
super().__init__(is_encoder_decoder=__A , **__A )
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return self.d_model
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
a =self.backbone_config.to_dict()
a =self.__class__.model_type
return output
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return 12 | 81 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
lowerCAmelCase_ : Optional[int] = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
lowerCAmelCase_ : Union[str, Any] = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
lowerCAmelCase_ : Optional[Any] = BeautifulSoup(res.text, '''html.parser''')
lowerCAmelCase_ : Any = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 361 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = math.inf , lowerCAmelCase = -math.inf , lowerCAmelCase = math.inf , lowerCAmelCase = -math.inf , lowerCAmelCase = False , lowerCAmelCase = 100 , lowerCAmelCase = 0.01 , lowerCAmelCase = 1 , ):
'''simple docstring'''
UpperCAmelCase = False
UpperCAmelCase = search_prob
UpperCAmelCase = start_temperate
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = None
while not search_end:
UpperCAmelCase = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase = current_state
scores.append(lowerCAmelCase )
iterations += 1
UpperCAmelCase = None
UpperCAmelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
UpperCAmelCase = neighbors.pop(lowerCAmelCase )
UpperCAmelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase = picked_neighbor
else:
UpperCAmelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase = picked_neighbor
UpperCAmelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase = True
else:
UpperCAmelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : List[str] = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ : int = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : Optional[Any] = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : List[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
lowerCAmelCase_ : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
| 248 | 0 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
UpperCAmelCase__ = BeautifulSoup(requests.get(_lowerCAmelCase ).text , "html.parser" )
UpperCAmelCase__ = soup.findAll("h1" )
UpperCAmelCase__ = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCAmelCase , _lowerCAmelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 169 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 173 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCamelCase( lowercase_ = "" ) -> dict[str, float]:
'''simple docstring'''
snake_case_ = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
snake_case_ = BeautifulSoup(requests.get(lowercase_ ).text , """html.parser""" )
snake_case_ = soup.find_all("""td""" , attrs="""titleColumn""" )
snake_case_ = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase_ , lowercase_ )
}
def UpperCamelCase( lowercase_ = "IMDb_Top_250_Movies.csv" ) -> None:
'''simple docstring'''
snake_case_ = get_imdb_top_aaa_movies()
with open(lowercase_ , """w""" , newline="""""" ) as out_file:
snake_case_ = csv.writer(lowercase_ )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies() | 34 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Optional[Any] = 'mobilenet_v2'
def __init__( self , lowerCamelCase=3 , lowerCamelCase=224 , lowerCamelCase=1.0 , lowerCamelCase=8 , lowerCamelCase=8 , lowerCamelCase=6 , lowerCamelCase=32 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="relu6" , lowerCamelCase=True , lowerCamelCase=0.8 , lowerCamelCase=0.02 , lowerCamelCase=0.001 , lowerCamelCase=255 , **lowerCamelCase , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = depth_multiplier
snake_case_ = depth_divisible_by
snake_case_ = min_depth
snake_case_ = expand_ratio
snake_case_ = output_stride
snake_case_ = first_layer_is_expansion
snake_case_ = finegrained_output
snake_case_ = hidden_act
snake_case_ = tf_padding
snake_case_ = classifier_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = semantic_loss_ignore_index
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Dict = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCAmelCase_ ( self ) -> float:
return 1e-4 | 34 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__A : Tuple = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
_UpperCAmelCase = True
while ask_again:
_UpperCAmelCase = input(_SCREAMING_SNAKE_CASE )
try:
if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0:
return default
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any]=[] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : str=0 ):
'''simple docstring'''
_UpperCAmelCase = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = menu.run(default_choice=_SCREAMING_SNAKE_CASE )
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _a ( argparse.RawDescriptionHelpFormatter):
"""simple docstring"""
def lowercase__ ( self : Any , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] )->int:
_UpperCAmelCase = super()._format_usage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 260 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
__A : int = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.getbasetemp() / '''cache'''
_UpperCAmelCase = test_hf_cache_home / '''datasets'''
_UpperCAmelCase = test_hf_cache_home / '''metrics'''
_UpperCAmelCase = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='''session''' )
def lowercase ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _SCREAMING_SNAKE_CASE )
| 260 | 1 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCAmelCase :
def __init__(self ):
_UpperCAmelCase : Tuple = {}
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ):
if self.graph.get(a_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase : Optional[int] = [[w, v]]
if not self.graph.get(a_ ):
_UpperCAmelCase : int = []
def snake_case_ (self ):
return list(self.graph )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
if self.graph.get(a_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(a_ )
def snake_case_ (self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ):
if s == d:
return []
_UpperCAmelCase : str = []
_UpperCAmelCase : Any = []
if s == -2:
_UpperCAmelCase : Dict = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(a_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(a_ ) != 0:
_UpperCAmelCase : List[str] = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : List[Any] = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return visited
def snake_case_ (self , lowerCAmelCase__=-1 ):
if c == -1:
_UpperCAmelCase : List[Any] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(a_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase : Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(a_ , a_ , 1 )
def snake_case_ (self , lowerCAmelCase__=-2 ):
_UpperCAmelCase : List[str] = deque()
_UpperCAmelCase : Optional[Any] = []
if s == -2:
_UpperCAmelCase : List[str] = list(self.graph )[0]
d.append(a_ )
visited.append(a_ )
while d:
_UpperCAmelCase : Any = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def snake_case_ (self , lowerCAmelCase__ ):
return len(self.graph[u] )
def snake_case_ (self , lowerCAmelCase__=-2 ):
_UpperCAmelCase : int = []
_UpperCAmelCase : Tuple = []
if s == -2:
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : Dict = s
_UpperCAmelCase : List[str] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(a_ ) != 0:
_UpperCAmelCase : List[str] = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : Dict = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return sorted_nodes
def snake_case_ (self ):
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Dict = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : List[Any] = -2
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Union[str, Any] = s
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : int = len(a_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Optional[int] = True
if len(a_ ) != 0:
_UpperCAmelCase : Tuple = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : Optional[int] = False
indirect_parents.append(a_ )
_UpperCAmelCase : List[str] = s
_UpperCAmelCase : int = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return list(a_ )
def snake_case_ (self ):
_UpperCAmelCase : str = []
_UpperCAmelCase : Any = []
_UpperCAmelCase : Dict = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : int = -2
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : List[str] = s
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Union[str, Any] = len(a_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : str = True
if len(a_ ) != 0:
_UpperCAmelCase : Dict = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : Tuple = False
indirect_parents.append(a_ )
_UpperCAmelCase : Optional[int] = s
_UpperCAmelCase : List[str] = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return False
def snake_case_ (self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ):
_UpperCAmelCase : Union[str, Any] = time()
self.dfs(a_ , a_ )
_UpperCAmelCase : List[str] = time()
return end - begin
def snake_case_ (self , lowerCAmelCase__=-2 ):
_UpperCAmelCase : Any = time()
self.bfs(a_ )
_UpperCAmelCase : Any = time()
return end - begin
class __lowerCAmelCase :
def __init__(self ):
_UpperCAmelCase : Union[str, Any] = {}
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ):
if self.graph.get(a_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase : str = [[w, v]]
# add the other way
if self.graph.get(a_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase : Optional[int] = [[w, u]]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
if self.graph.get(a_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(a_ )
# the other way round
if self.graph.get(a_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(a_ )
def snake_case_ (self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ):
if s == d:
return []
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : int = []
if s == -2:
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(a_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(a_ ) != 0:
_UpperCAmelCase : List[str] = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : List[Any] = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return visited
def snake_case_ (self , lowerCAmelCase__=-1 ):
if c == -1:
_UpperCAmelCase : Union[str, Any] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(a_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase : Tuple = floor(random() * c ) + 1
if n != i:
self.add_pair(a_ , a_ , 1 )
def snake_case_ (self , lowerCAmelCase__=-2 ):
_UpperCAmelCase : Tuple = deque()
_UpperCAmelCase : Optional[Any] = []
if s == -2:
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
d.append(a_ )
visited.append(a_ )
while d:
_UpperCAmelCase : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case_ (self , lowerCAmelCase__ ):
return len(self.graph[u] )
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : List[str] = -2
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Dict = s
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Optional[int] = len(a_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Tuple = True
if len(a_ ) != 0:
_UpperCAmelCase : Optional[int] = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : Optional[Any] = False
indirect_parents.append(a_ )
_UpperCAmelCase : Optional[Any] = s
_UpperCAmelCase : List[str] = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return list(a_ )
def snake_case_ (self ):
_UpperCAmelCase : int = []
_UpperCAmelCase : Any = []
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : List[Any] = -2
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Union[str, Any] = s
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Any = len(a_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : List[str] = True
if len(a_ ) != 0:
_UpperCAmelCase : Any = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : int = False
indirect_parents.append(a_ )
_UpperCAmelCase : Union[str, Any] = s
_UpperCAmelCase : str = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return False
def snake_case_ (self ):
return list(self.graph )
def snake_case_ (self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ):
_UpperCAmelCase : Tuple = time()
self.dfs(a_ , a_ )
_UpperCAmelCase : Optional[Any] = time()
return end - begin
def snake_case_ (self , lowerCAmelCase__=-2 ):
_UpperCAmelCase : List[Any] = time()
self.bfs(a_ )
_UpperCAmelCase : List[Any] = time()
return end - begin
| 352 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __a ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ):
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__(self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 5_0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , lowerCAmelCase__ ):
_UpperCAmelCase : str = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_UpperCAmelCase : int = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(lowerCAmelCase__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_UpperCAmelCase : Optional[Any] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase : str = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : List[str] = self.scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , eta=lowerCAmelCase__ , use_clipped_model_output=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 170 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Any = tokenizer(example["content"] , truncation=_a)["input_ids"]
SCREAMING_SNAKE_CASE : Dict = len(example["content"]) / len(output["input_ids"])
return output
a_ = HfArgumentParser(PretokenizationArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a_ = time.time()
a_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 76 |
from math import factorial
def lowerCamelCase__ ( _a , _a , _a):
if successes > trials:
raise ValueError("successes must be lower or equal to trials")
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers")
if not isinstance(_a , _a) or not isinstance(_a , _a):
raise ValueError("the function is defined for non-negative integers")
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0")
SCREAMING_SNAKE_CASE : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
SCREAMING_SNAKE_CASE : List[Any] = float(factorial(_a))
coefficient /= factorial(_a) * factorial(trials - successes)
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75)) | 76 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Union[str, Any] = filter(lambda __a : p.requires_grad , model.parameters() )
snake_case_ : List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if metric == "rouge2":
snake_case_ : Union[str, Any] = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
snake_case_ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
snake_case_ : List[str] = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
snake_case_ : str = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
snake_case_ : int = ModelCheckpoint(
dirpath=__a , filename=__a , monitor=f"""val_{metric}""" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=__a , verbose=__a , )
class SCREAMING_SNAKE_CASE_ ( pl.Callback ):
def UpperCAmelCase_ ( self : str , _A : Optional[Any] , _A : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[Any] = {F"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(A_ )
@rank_zero_only
def UpperCAmelCase_ ( self : List[str] , _A : Tuple , _A : List[str] , _A : Optional[int] , _A : Optional[int]=True ) -> None:
"""simple docstring"""
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
snake_case_ : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
snake_case_ : str = Path(pl_module.hparams.output_dir )
if type_path == "test":
snake_case_ : str = od / 'test_results.txt'
snake_case_ : Optional[int] = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
snake_case_ : Any = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
snake_case_ : Any = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=A_ )
generations_file.parent.mkdir(exist_ok=A_ )
with open(A_ , 'a+' ) as writer:
for key in sorted(A_ ):
if key in ["log", "progress_bar", "preds"]:
continue
snake_case_ : Dict = metrics[key]
if isinstance(A_ , torch.Tensor ):
snake_case_ : List[Any] = val.item()
snake_case_ : List[Any] = F"""{key}: {val:.6f}\n"""
writer.write(A_ )
if not save_generations:
return
if "preds" in metrics:
snake_case_ : List[str] = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(A_ )
@rank_zero_only
def UpperCAmelCase_ ( self : List[str] , _A : List[str] , _A : Optional[Any] ) -> int:
"""simple docstring"""
try:
snake_case_ : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
snake_case_ : int = pl_module.model.num_parameters()
snake_case_ : Dict = count_trainable_parameters(A_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCAmelCase_ ( self : Any , _A : List[str] , _A : str ) -> Tuple:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(A_ , A_ , 'test' )
@rank_zero_only
def UpperCAmelCase_ ( self : Any , _A : Optional[Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 371 |
def SCREAMING_SNAKE_CASE__ ( __a = 60_08_51_47_51_43 ):
try:
snake_case_ : Optional[Any] = int(__a )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
snake_case_ : Any = 2
snake_case_ : Any = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
snake_case_ : Tuple = i
while n % i == 0:
snake_case_ : List[str] = n // i
i += 1
return int(__a )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 88 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCamelCase: int = 1_6
_UpperCamelCase: Optional[Any] = 3_2
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase = 16 ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowercase : Dict = load_dataset('glue' , 'mrpc' )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase : Any = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase : Dict = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase : List[Any] = 16
elif accelerator.mixed_precision != "no":
lowercase : Any = 8
else:
lowercase : int = None
return tokenizer.pad(
lowerCAmelCase_ , padding='longest' , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors='pt' , )
# Instantiate dataloaders.
lowercase : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
lowercase : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCamelCase: Tuple = mocked_dataloaders # noqa: F811
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCAmelCase_ ) == "1":
lowercase : int = 2
# New Code #
lowercase : List[str] = int(args.gradient_accumulation_steps )
lowercase : Union[str, Any] = int(args.local_sgd_steps )
# Initialize accelerator
lowercase : List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCAmelCase_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : int = config['''lr''']
lowercase : Any = int(config['num_epochs'] )
lowercase : Optional[int] = int(config['seed'] )
lowercase : Optional[Any] = int(config['batch_size'] )
lowercase : Tuple = evaluate.load('glue' , 'mrpc' )
set_seed(lowerCAmelCase_ )
lowercase : Optional[Any] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : str = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : Dict = model.to(accelerator.device )
# Instantiate optimizer
lowercase : Optional[Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
lowercase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase : Dict = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
with LocalSGD(
accelerator=lowerCAmelCase_ , model=lowerCAmelCase_ , local_sgd_steps=lowerCAmelCase_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCAmelCase_ ):
lowercase : Union[str, Any] = model(**lowerCAmelCase_ )
lowercase : Tuple = output.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : Optional[Any] = model(**lowerCAmelCase_ )
lowercase : Union[str, Any] = outputs.logits.argmax(dim=-1 )
lowercase : List[str] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
lowercase : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase_ )
def lowercase__ ( ) -> int:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=lowerCAmelCase_ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=lowerCAmelCase_ , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowercase : List[str] = parser.parse_args()
lowercase : List[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 255 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[str] =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCAmelCase : Optional[int] =25_0004
_UpperCAmelCase : Tuple =25_0020
@require_sentencepiece
@require_tokenizers
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MBartTokenizer
SCREAMING_SNAKE_CASE__ : Dict = MBartTokenizerFast
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : List[str] = True
def lowercase_ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : str = MBartTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = MBartTokenizer(__lowercase , keep_accents=__lowercase )
lowerCAmelCase_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase_ : Dict = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowercase_ ( self ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = tempfile.mkdtemp()
lowerCAmelCase_ : Union[str, Any] = tokenizer_r.save_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Tuple = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Tuple = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Optional[int] = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(__lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Dict = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """facebook/mbart-large-en-ro"""
SCREAMING_SNAKE_CASE__ : int = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
SCREAMING_SNAKE_CASE__ : str = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def lowercase_ ( cls ) -> Optional[int]:
lowerCAmelCase_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowerCAmelCase_ : Optional[Any] = 1
return cls
def lowercase_ ( self ) -> Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
def lowercase_ ( self ) -> Any:
self.assertIn(__lowercase , self.tokenizer.all_special_ids )
lowerCAmelCase_ : Union[str, Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCAmelCase_ : Tuple = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
lowerCAmelCase_ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertNotIn(self.tokenizer.eos_token , __lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Union[str, Any] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , __lowercase )
lowerCAmelCase_ : str = 1_0
lowerCAmelCase_ : Tuple = self.tokenizer(__lowercase , max_length=__lowercase , truncation=__lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowercase )
self.assertEqual(len(__lowercase ) , __lowercase )
def lowercase_ ( self ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = tempfile.mkdtemp()
lowerCAmelCase_ : int = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = MBartTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowercase )
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''' )
lowerCAmelCase_ : Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCAmelCase_ : int = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCAmelCase_ : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(self.src_text , padding=__lowercase , truncation=__lowercase , max_length=3 , return_tensors='''pt''' )
lowerCAmelCase_ : Any = self.tokenizer(
text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=1_0 , return_tensors='''pt''' )
lowerCAmelCase_ : int = targets['''input_ids''']
lowerCAmelCase_ : Optional[Any] = shift_tokens_right(__lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__lowercase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , ) | 262 | 0 |
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :list[list[float]] = []
for data in source_data:
for i, el in enumerate(lowerCamelCase ):
if len(lowerCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(lowerCamelCase ) )
return data_lists
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
lowercase :list[list[float]] = []
for dlist, weight in zip(lowerCamelCase, lowerCamelCase ):
lowercase :Dict = min(lowerCamelCase )
lowercase :str = max(lowerCamelCase )
lowercase :list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase :Any = F"Invalid weight of {weight:f} provided"
raise ValueError(lowerCamelCase )
score_lists.append(lowerCamelCase )
return score_lists
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(lowerCamelCase ):
lowercase :List[str] = final_scores[j] + ele
return final_scores
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
lowercase :Optional[Any] = get_data(lowerCamelCase )
lowercase :Optional[int] = calculate_each_score(lowerCamelCase, lowerCamelCase )
lowercase :List[Any] = generate_final_scores(lowerCamelCase )
# append scores to source data
for i, ele in enumerate(lowerCamelCase ):
source_data[i].append(lowerCamelCase )
return source_data
| 350 |
import pytest
_UpperCAmelCase : List[Any] = "__dummy_dataset1__"
_UpperCAmelCase : Union[str, Any] = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def UpperCAmelCase__ ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCAmelCase__ ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :Tuple = dataset_loading_script_name
lowercase :Dict = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowercase :int = script_dir / F"{script_name}.py"
with open(lowerCamelCase, "w" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 158 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.