code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> Any:
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCAmelCase__ , n - 1 , lowerCAmelCase__ ) * a) % mod
else:
lowercase = binary_exponentiation(lowerCAmelCase__ , n / 2 , lowerCAmelCase__ )
return (b * b) % mod
# a prime number
__lowerCAmelCase : List[Any] =7_0_1
__lowerCAmelCase : Optional[int] =1_0_0_0_0_0_0_0_0_0
__lowerCAmelCase : Any =1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 197 | """simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _A :
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = inputs["""prompt"""]
lowercase = inputs["""generator"""]
lowercase = inputs["""num_inference_steps"""]
lowercase = inputs["""output_type"""]
if "image" in inputs:
lowercase = inputs["""image"""]
else:
lowercase = None
if "mask_image" in inputs:
lowercase = inputs["""mask_image"""]
else:
lowercase = None
if "original_image" in inputs:
lowercase = inputs["""original_image"""]
else:
lowercase = None
lowercase , lowercase = pipe.encode_prompt(__lowerCAmelCase )
# inputs with prompt converted to embeddings
lowercase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
lowercase = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCAmelCase , __lowerCAmelCase ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = inputs["""generator"""]
lowercase = inputs["""num_inference_steps"""]
lowercase = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowercase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
lowercase = pipe_loaded(**__lowerCAmelCase )[0]
lowercase = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase , 1E-4 )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
lowercase = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = pipe_loaded(**__lowerCAmelCase )[0]
lowercase = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase , 1E-4 )
| 197 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_A : Dict , **_A : Union[str, Any] ) -> None:
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , _A , )
super().__init__(*_A , **_A ) | 359 |
'''simple docstring'''
import math
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
return math.pow(lowerCAmelCase , 2 ) - a
def lowerCamelCase ( lowerCAmelCase : float ):
"""simple docstring"""
return 2 * x
def lowerCamelCase ( lowerCAmelCase : float ):
"""simple docstring"""
__magic_name__ : List[Any] = 2.0
while start <= a:
__magic_name__ : List[str] = math.pow(lowerCAmelCase , 2 )
return start
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : int = 9999 , lowerCAmelCase : float = 0.00_0000_0000_0001 ):
"""simple docstring"""
if a < 0:
raise ValueError('math domain error' )
__magic_name__ : Any = get_initial_point(lowerCAmelCase )
for _ in range(lowerCAmelCase ):
__magic_name__ : List[str] = value
__magic_name__ : Optional[int] = value - fx(lowerCAmelCase , lowerCAmelCase ) / fx_derivative(lowerCAmelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod() | 275 | 0 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=2 , _a=8 , _a=True , _a=True , _a=True , _a=True , _a=9_9 , _a=1_6 , _a=5 , _a=2 , _a=3_6 , _a="gelu" , _a=0.0 , _a=0.0 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Optional[Any]:
_a : Tuple = parent
_a : Dict = batch_size
_a : Dict = seq_length
_a : Optional[Any] = is_training
_a : Tuple = use_input_mask
_a : Tuple = use_token_type_ids
_a : Any = use_labels
_a : Any = vocab_size
_a : int = hidden_size
_a : str = num_hidden_layers
_a : int = num_attention_heads
_a : Optional[int] = intermediate_size
_a : Optional[Any] = hidden_act
_a : int = hidden_dropout_prob
_a : List[str] = attention_probs_dropout_prob
_a : str = max_position_embeddings
_a : Optional[int] = type_vocab_size
_a : List[str] = type_sequence_label_size
_a : Optional[Any] = initializer_range
_a : Any = num_labels
_a : List[Any] = num_choices
_a : List[str] = scope
def __lowercase ( self ) -> Any:
_a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : List[Any] = None
if self.use_input_mask:
_a : str = random_attention_mask([self.batch_size, self.seq_length] )
_a : Any = None
if self.use_token_type_ids:
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Dict = None
_a : str = None
_a : Dict = None
if self.use_labels:
_a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_a : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self ) -> Tuple:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __lowercase ( self ) -> Optional[Any]:
_a : Union[str, Any] = self.get_config()
_a : Union[str, Any] = 3_0_0
return config
def __lowercase ( self ) -> List[Any]:
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
_a : Any = True
_a : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]:
_a : Any = MraModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a , attention_mask=_a , token_type_ids=_a )
_a : List[str] = model(_a , token_type_ids=_a )
_a : Any = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int:
_a : Optional[int] = True
_a : List[Any] = MraModel(_a )
model.to(_a )
model.eval()
_a : str = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_a : int = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , )
_a : Optional[int] = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_a : Optional[int] = MraForMaskedLM(config=_a )
model.to(_a )
model.eval()
_a : List[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
_a : Tuple = MraForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_a : str = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]:
_a : List[Any] = self.num_labels
_a : List[str] = MraForSequenceClassification(_a )
model.to(_a )
model.eval()
_a : List[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_a : Any = self.num_labels
_a : Any = MraForTokenClassification(config=_a )
model.to(_a )
model.eval()
_a : Union[str, Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> int:
_a : Dict = self.num_choices
_a : Tuple = MraForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Optional[int] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[int] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : List[Any] = config_and_inputs
_a : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Optional[int] = ()
def __lowercase ( self ) -> List[str]:
_a : str = MraModelTester(self )
_a : Dict = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def __lowercase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> List[str]:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Dict:
_a : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Any = type
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __lowercase ( self ) -> Tuple:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def __lowercase ( self ) -> str:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def __lowercase ( self ) -> Tuple:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __lowercase ( self ) -> int:
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __lowercase ( self ) -> str:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[str] = MraModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''MRA does not output attentions''' )
def __lowercase ( self ) -> Any:
return
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
_a : Optional[int] = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
_a : Dict = model(_a )[0]
_a : List[str] = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , _a )
_a : Any = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> Dict:
_a : List[Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
_a : Union[str, Any] = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
_a : str = model(_a )[0]
_a : str = 5_0_2_6_5
_a : Any = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , _a )
_a : Tuple = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> List[Any]:
_a : Optional[Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
_a : List[str] = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
_a : Optional[int] = model(_a )[0]
_a : Any = 5_0_2_6_5
_a : List[Any] = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , _a )
_a : Any = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 235 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : Dict = "ChineseCLIPImageProcessor"
UpperCAmelCase__ : List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Tuple = kwargs.pop('''feature_extractor''' )
_a : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : List[str] = self.image_processor
def __call__( self , _a=None , _a=None , _a=None , **_a ) -> int:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_a : List[str] = self.tokenizer(_a , return_tensors=_a , **_a )
if images is not None:
_a : Optional[Any] = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
_a : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> Any:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = self.tokenizer.model_input_names
_a : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Dict:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
| 235 | 1 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def UpperCamelCase ( _a , _a , _a ) -> list[int]:
'''simple docstring'''
lowercase_ :Optional[int] = [0] * no_of_processes
lowercase_ :Tuple = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_A ):
lowercase_ :Dict = burst_time[i]
lowercase_ :Optional[Any] = 0
lowercase_ :Dict = 0
lowercase_ :List[Any] = 9_9_9_9_9_9_9_9_9
lowercase_ :Tuple = 0
lowercase_ :int = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_A ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowercase_ :Optional[int] = remaining_time[j]
lowercase_ :int = j
lowercase_ :Dict = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowercase_ :Union[str, Any] = remaining_time[short]
if minm == 0:
lowercase_ :Dict = 9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
lowercase_ :Optional[Any] = False
# Find finish time of current process
lowercase_ :List[str] = increment_time + 1
# Calculate waiting time
lowercase_ :Optional[int] = finish_time - arrival_time[short]
lowercase_ :Optional[Any] = finar - burst_time[short]
if waiting_time[short] < 0:
lowercase_ :Any = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCamelCase ( _a , _a , _a ) -> list[int]:
'''simple docstring'''
lowercase_ :int = [0] * no_of_processes
for i in range(_A ):
lowercase_ :Union[str, Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCamelCase ( _a , _a , _a ) -> None:
'''simple docstring'''
lowercase_ :List[Any] = 0
lowercase_ :int = 0
for i in range(_A ):
lowercase_ :Optional[Any] = total_waiting_time + waiting_time[i]
lowercase_ :str = total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
SCREAMING_SNAKE_CASE : Optional[Any] = int(input())
SCREAMING_SNAKE_CASE : str = [0] * no_of_processes
SCREAMING_SNAKE_CASE : Tuple = [0] * no_of_processes
SCREAMING_SNAKE_CASE : Dict = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
SCREAMING_SNAKE_CASE : Any = map(int, input().split())
SCREAMING_SNAKE_CASE : List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE : List[Any] = burst_time
SCREAMING_SNAKE_CASE : List[Any] = no_of_processes
SCREAMING_SNAKE_CASE : int = waiting_time
SCREAMING_SNAKE_CASE : int = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
SCREAMING_SNAKE_CASE : List[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 355 |
from itertools import count
def UpperCamelCase ( _a = 5_0 ) -> int:
'''simple docstring'''
lowercase_ :Dict = [1] * min_block_length
for n in count(_a ):
fill_count_functions.append(1 )
for block_length in range(_a , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 252 | 0 |
"""simple docstring"""
_a : List[str] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
_a : Optional[Any] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Tuple = from_type.lower().strip("""s""" )
_lowerCAmelCase : str = to_type.lower().strip("""s""" )
_lowerCAmelCase : int = UNIT_SYMBOL.get(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Any = UNIT_SYMBOL.get(_lowerCamelCase ,_lowerCamelCase )
if from_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase : Optional[int] = (
f"Invalid 'from_type' value: {from_type!r}.\n"
f"Conversion abbreviations are: {', '.join(_lowerCamelCase )}"
)
raise ValueError(_lowerCamelCase )
if to_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase : int = (
f"Invalid 'to_type' value: {to_type!r}.\n"
f"Conversion abbreviations are: {', '.join(_lowerCamelCase )}"
)
raise ValueError(_lowerCamelCase )
_lowerCAmelCase : Tuple = METRIC_CONVERSION[from_sanitized]
_lowerCAmelCase : Optional[int] = METRIC_CONVERSION[to_sanitized]
_lowerCAmelCase : List[str] = 1
if from_exponent > to_exponent:
_lowerCAmelCase : str = from_exponent - to_exponent
else:
_lowerCAmelCase : Dict = -(to_exponent - from_exponent)
return value * pow(10 ,_lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 44 |
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a ) -> None:
snake_case_ = set_counts
snake_case_ = max(a )
snake_case_ = len(a )
snake_case_ = [1] * num_sets
snake_case_ = list(range(a ) )
def _UpperCamelCase ( self , a , a ) -> bool:
snake_case_ = self.get_parent(a )
snake_case_ = self.get_parent(a )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
snake_case_ = 0
snake_case_ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
snake_case_ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
snake_case_ = 0
snake_case_ = src_parent
snake_case_ = self.set_counts[src_parent]
snake_case_ = max(self.max_set , a )
return True
def _UpperCamelCase ( self , a ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
snake_case_ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 178 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any] ):
__UpperCAmelCase = jnp.ones((batch_size, length) ) / length
return scores
def a ( self : Any ):
__UpperCAmelCase = None
__UpperCAmelCase = 20
__UpperCAmelCase = self._get_uniform_logits(batch_size=2 , length=_snake_case )
# tweak scores to not be uniform anymore
__UpperCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__UpperCAmelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__UpperCAmelCase = jax.nn.softmax(_snake_case , axis=-1 )
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
__UpperCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
__UpperCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def a ( self : Optional[int] ):
__UpperCAmelCase = None
__UpperCAmelCase = 10
__UpperCAmelCase = 2
# create ramp distribution
__UpperCAmelCase = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy()
__UpperCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
__UpperCAmelCase = FlaxTopKLogitsWarper(3 )
__UpperCAmelCase = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__UpperCAmelCase = 5
__UpperCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__UpperCAmelCase = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy()
__UpperCAmelCase = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def a ( self : Optional[Any] ):
__UpperCAmelCase = None
__UpperCAmelCase = 10
__UpperCAmelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__UpperCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
__UpperCAmelCase = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__UpperCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__UpperCAmelCase = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__UpperCAmelCase = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__UpperCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__UpperCAmelCase = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def a ( self : Optional[Any] ):
__UpperCAmelCase = 20
__UpperCAmelCase = 4
__UpperCAmelCase = 0
__UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
# check that min length is applied at length 5
__UpperCAmelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
__UpperCAmelCase = 5
__UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
__UpperCAmelCase = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
__UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
__UpperCAmelCase = 15
__UpperCAmelCase = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = 20
__UpperCAmelCase = 4
__UpperCAmelCase = 0
__UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
# check that all scores are -inf except the bos_token_id score
__UpperCAmelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
__UpperCAmelCase = 1
__UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
__UpperCAmelCase = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__UpperCAmelCase = 3
__UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
__UpperCAmelCase = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def a ( self : List[Any] ):
__UpperCAmelCase = 20
__UpperCAmelCase = 4
__UpperCAmelCase = 0
__UpperCAmelCase = 5
__UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
__UpperCAmelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
__UpperCAmelCase = 4
__UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
__UpperCAmelCase = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__UpperCAmelCase = 3
__UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
__UpperCAmelCase = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def a ( self : Any ):
__UpperCAmelCase = 4
__UpperCAmelCase = 10
__UpperCAmelCase = 15
__UpperCAmelCase = 2
__UpperCAmelCase = 1
__UpperCAmelCase = 15
# dummy input_ids and scores
__UpperCAmelCase = ids_tensor((batch_size, sequence_length) , _snake_case )
__UpperCAmelCase = input_ids.copy()
__UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
__UpperCAmelCase = scores.copy()
# instantiate all dist processors
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase = FlaxTopKLogitsWarper(3 )
__UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
__UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
__UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
__UpperCAmelCase = 10
# no processor list
__UpperCAmelCase = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
__UpperCAmelCase = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
__UpperCAmelCase = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
__UpperCAmelCase = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
__UpperCAmelCase = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
__UpperCAmelCase = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
# with processor list
__UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__UpperCAmelCase = processor(_snake_case , _snake_case , cur_len=_snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def a ( self : str ):
__UpperCAmelCase = 4
__UpperCAmelCase = 10
__UpperCAmelCase = 15
__UpperCAmelCase = 2
__UpperCAmelCase = 1
__UpperCAmelCase = 15
# dummy input_ids and scores
__UpperCAmelCase = ids_tensor((batch_size, sequence_length) , _snake_case )
__UpperCAmelCase = input_ids.copy()
__UpperCAmelCase = self._get_uniform_logits(_snake_case , _snake_case )
__UpperCAmelCase = scores.copy()
# instantiate all dist processors
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase = FlaxTopKLogitsWarper(3 )
__UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
__UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
__UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
__UpperCAmelCase = 10
# no processor list
def run_no_processor_list(_lowercase : str , _lowercase : Dict , _lowercase : Optional[int] ):
__UpperCAmelCase = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
__UpperCAmelCase = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
__UpperCAmelCase = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
__UpperCAmelCase = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
__UpperCAmelCase = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
__UpperCAmelCase = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
return scores
# with processor list
def run_processor_list(_lowercase : str , _lowercase : str , _lowercase : Any ):
__UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__UpperCAmelCase = processor(_snake_case , _snake_case , cur_len=_snake_case )
return scores
__UpperCAmelCase = jax.jit(_snake_case )
__UpperCAmelCase = jax.jit(_snake_case )
__UpperCAmelCase = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case )
__UpperCAmelCase = jitted_run_processor_list(_snake_case , _snake_case , _snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 351 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 86 | 0 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCamelCase ( lowerCAmelCase__ ):
@require_torch
def a_ ( self) -> Tuple:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
snake_case_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
snake_case_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase__)
BertModel.from_pretrained(lowerCAmelCase__)
BertTokenizer.from_pretrained(lowerCAmelCase__)
pipeline(task='fill-mask', model=lowerCAmelCase__)
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
snake_case_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ = '1'
snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
@require_torch
def a_ ( self) -> Tuple:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
snake_case_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
snake_case_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase__)
BertModel.from_pretrained(lowerCAmelCase__)
BertTokenizer.from_pretrained(lowerCAmelCase__)
pipeline(task='fill-mask', model=lowerCAmelCase__)
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
snake_case_ = self.get_env()
snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
@require_torch
def a_ ( self) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
snake_case_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
snake_case_ = self.get_env()
snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
# next emulate no network
snake_case_ = [sys.executable, '-c', '\n'.join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ = '1'
snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
@require_torch
def a_ ( self) -> Optional[int]:
snake_case_ = '\nfrom transformers import pipeline\n '
snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
snake_case_ = self.get_env()
snake_case_ = '1'
snake_case_ = [sys.executable, '-c', '\n'.join([load, mock, run])]
snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode, 1, result.stderr)
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode', result.stderr.decode().replace('\n', ''), )
@require_torch
def a_ ( self) -> List[str]:
snake_case_ = '\nfrom transformers import AutoModel\n '
snake_case_ = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
snake_case_ = self.get_env()
snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ = '1'
snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
| 69 | """simple docstring"""
from math import factorial
def UpperCAmelCase ( UpperCAmelCase = 20 ) -> int:
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(UpperCAmelCase ) / (factorial(UpperCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 69 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase =ksize + 1
lowerCamelCase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_UpperCAmelCase ):
for x in range(_UpperCAmelCase ):
# distance from center
lowerCamelCase =x - ksize // 2
lowerCamelCase =y - ksize // 2
# degree to radiant
lowerCamelCase =theta / 1_80 * np.pi
lowerCamelCase =np.cos(_theta )
lowerCamelCase =np.sin(_theta )
# get kernel x
lowerCamelCase =cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase =-sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
UpperCAmelCase__ : str =imread('''../image_data/lena.jpg''')
# turn image in gray scale value
UpperCAmelCase__ : Optional[int] =cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
UpperCAmelCase__ : Optional[int] =np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
UpperCAmelCase__ : Tuple =gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
UpperCAmelCase__ : Dict =out / out.max() * 2_55
UpperCAmelCase__ : str =out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 371 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowercase ( _UpperCAmelCase = "isbn/0140328726" ) -> dict:
lowerCamelCase =olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
lowerCamelCase =F"""{olid} is not a valid Open Library olid"""
raise ValueError(_UpperCAmelCase )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _lowercase ( _UpperCAmelCase ) -> dict:
lowerCamelCase ={
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
lowerCamelCase ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCamelCase =[
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
lowerCamelCase =data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase =""", """.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCAmelCase__ : List[str] =input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
UpperCAmelCase__ : Dict =summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print('''\n'''.join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.")
| 262 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : str =logging.get_logger(__name__)
_lowercase : Union[str, Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case__ (A__ , A__ ):
"""simple docstring"""
__lowerCAmelCase :str = "swin"
__lowerCAmelCase :str = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , __lowercase=2_2_4 , __lowercase=4 , __lowercase=3 , __lowercase=9_6 , __lowercase=[2, 2, 6, 2] , __lowercase=[3, 6, 1_2, 2_4] , __lowercase=7 , __lowercase=4.0 , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase="gelu" , __lowercase=False , __lowercase=0.0_2 , __lowercase=1E-5 , __lowercase=3_2 , __lowercase=None , __lowercase=None , **__lowercase , ) -> int:
"""simple docstring"""
super().__init__(**__lowercase )
a__ : Any = image_size
a__ : Any = patch_size
a__ : Any = num_channels
a__ : Tuple = embed_dim
a__ : List[Any] = depths
a__ : List[Any] = len(__lowercase )
a__ : Dict = num_heads
a__ : Union[str, Any] = window_size
a__ : Dict = mlp_ratio
a__ : Optional[int] = qkv_bias
a__ : Union[str, Any] = hidden_dropout_prob
a__ : str = attention_probs_dropout_prob
a__ : Dict = drop_path_rate
a__ : Dict = hidden_act
a__ : Tuple = use_absolute_embeddings
a__ : str = layer_norm_eps
a__ : str = initializer_range
a__ : List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a__ : Dict = int(embed_dim * 2 ** (len(__lowercase ) - 1) )
a__ : Optional[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(__lowercase ) + 1 )]
a__ , a__ : Any = get_aligned_output_features_output_indices(
out_features=__lowercase , out_indices=__lowercase , stage_names=self.stage_names )
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Any = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE__( self ) -> float:
"""simple docstring"""
return 1E-4
| 170 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : Tuple ={
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :List[Any] = "swinv2"
__lowerCAmelCase :List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , __lowercase=2_2_4 , __lowercase=4 , __lowercase=3 , __lowercase=9_6 , __lowercase=[2, 2, 6, 2] , __lowercase=[3, 6, 1_2, 2_4] , __lowercase=7 , __lowercase=4.0 , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase="gelu" , __lowercase=False , __lowercase=0.0_2 , __lowercase=1E-5 , __lowercase=3_2 , **__lowercase , ) -> Any:
"""simple docstring"""
super().__init__(**__lowercase )
a__ : Optional[Any] = image_size
a__ : Union[str, Any] = patch_size
a__ : List[Any] = num_channels
a__ : Union[str, Any] = embed_dim
a__ : Any = depths
a__ : List[str] = len(__lowercase )
a__ : Optional[Any] = num_heads
a__ : Union[str, Any] = window_size
a__ : Optional[int] = mlp_ratio
a__ : List[str] = qkv_bias
a__ : Dict = hidden_dropout_prob
a__ : str = attention_probs_dropout_prob
a__ : List[Any] = drop_path_rate
a__ : Tuple = hidden_act
a__ : Dict = use_absolute_embeddings
a__ : Tuple = layer_norm_eps
a__ : Tuple = initializer_range
a__ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a__ : int = int(embed_dim * 2 ** (len(__lowercase ) - 1) )
a__ : Dict = (0, 0, 0, 0)
| 170 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCAmelCase :List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase :Tuple = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase :Dict = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCAmelCase :List[str] = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase :Any = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCAmelCase :List[Any] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCamelCase ( lowerCAmelCase : List[str] ):
"""simple docstring"""
__magic_name__ : str = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowerCamelCase_ )
return [m.group(0 ) for m in matches]
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__magic_name__ : Any = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__magic_name__ : Union[str, Any] = collections.defaultdict(lowerCamelCase_ )
__magic_name__ : Any = collections.defaultdict(lowerCamelCase_ )
__magic_name__ : int = collections.defaultdict(lowerCamelCase_ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowerCamelCase_ ):
__magic_name__ : Union[str, Any] = None
if _re_tf_models.match(lowerCamelCase_ ) is not None:
__magic_name__ : Any = tf_models
__magic_name__ : int = _re_tf_models.match(lowerCamelCase_ ).groups()[0]
elif _re_flax_models.match(lowerCamelCase_ ) is not None:
__magic_name__ : List[str] = flax_models
__magic_name__ : Tuple = _re_flax_models.match(lowerCamelCase_ ).groups()[0]
elif _re_pt_models.match(lowerCamelCase_ ) is not None:
__magic_name__ : List[Any] = pt_models
__magic_name__ : int = _re_pt_models.match(lowerCamelCase_ ).groups()[0]
if lookup_dict is not None:
while len(lowerCamelCase_ ) > 0:
if attr_name in model_prefix_to_model_type:
__magic_name__ : Tuple = True
break
# Try again after removing the last word in the name
__magic_name__ : List[Any] = ''.join(camel_case_split(lowerCamelCase_ )[:-1] )
__magic_name__ : List[Any] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__magic_name__ : Dict = list(lowerCamelCase_ )
all_models.sort()
__magic_name__ : Tuple = {'model_type': all_models}
__magic_name__ : Dict = [pt_models[t] for t in all_models]
__magic_name__ : List[Any] = [tf_models[t] for t in all_models]
__magic_name__ : Any = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__magic_name__ : str = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__magic_name__ : int = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__magic_name__ : Union[str, Any] = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__magic_name__ : Optional[Any] = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__magic_name__ : Optional[int] = 'AutoTokenizer'
__magic_name__ : Any = [processors[t] for t in all_models]
return pd.DataFrame(lowerCamelCase_ )
def lowerCamelCase ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__magic_name__ : int = [model_mapping, f'TF_{model_mapping}', f'FLAX_{model_mapping}']
__magic_name__ : List[str] = [auto_class, f'TF_{auto_class}', f'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
# The type of pipeline may not exist in this framework
if not hasattr(lowerCamelCase_ , lowerCamelCase_ ):
continue
# First extract all model_names
__magic_name__ : Union[str, Any] = []
for name in getattr(lowerCamelCase_ , lowerCamelCase_ ).values():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
model_names.append(lowerCamelCase_ )
else:
model_names.extend(list(lowerCamelCase_ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
__magic_name__ : Tuple = get_frameworks_table()
__magic_name__ : Any = Dataset.from_pandas(lowerCamelCase_ )
__magic_name__ : Tuple = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowerCamelCase_ )
__magic_name__ : List[str] = Dataset.from_json(lowerCamelCase_ )
__magic_name__ : Dict = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(lowerCamelCase_ ) )
}
__magic_name__ : Any = update_pipeline_and_auto_class_table(lowerCamelCase_ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__magic_name__ : int = sorted(table.keys() )
__magic_name__ : Optional[Any] = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
__magic_name__ : int = Dataset.from_pandas(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowerCamelCase_ , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(lowerCamelCase_ , 'pipeline_tags.json' ) )
if commit_sha is not None:
__magic_name__ : List[Any] = (
f'Update with commit {commit_sha}\n\nSee: '
f'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
__magic_name__ : Tuple = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=lowerCamelCase_ , repo_type='dataset' , token=lowerCamelCase_ , commit_message=lowerCamelCase_ , )
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__magic_name__ : Optional[int] = transformers_module.pipelines.SUPPORTED_TASKS
__magic_name__ : str = []
for key in pipeline_tasks:
if key not in in_table:
__magic_name__ : List[Any] = pipeline_tasks[key]['pt']
if isinstance(lowerCamelCase_ , (list, tuple) ):
__magic_name__ : str = model[0]
__magic_name__ : Dict = model.__name__
if model not in in_table.values():
missing.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
__magic_name__ : Dict = ', '.join(lowerCamelCase_ )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
lowerCAmelCase :Optional[int] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha) | 368 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
set_seed(7_7_0)
lowerCAmelCase :str = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowerCAmelCase :Any = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowerCAmelCase :List[Any] = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase :List[Any] = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowerCAmelCase :List[str] = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
__magic_name__ : str = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase , REMOTE_MODEL_PATHS[key]['file_name'] )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
hf_hub_download(repo_id=lowerCAmelCase , filename=lowerCAmelCase , local_dir=lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : str="text" ):
"""simple docstring"""
if model_type == "text":
__magic_name__ : Tuple = BarkSemanticModel
__magic_name__ : Optional[int] = BarkSemanticConfig
__magic_name__ : List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
__magic_name__ : List[str] = BarkCoarseModel
__magic_name__ : Dict = BarkCoarseConfig
__magic_name__ : Tuple = BarkCoarseGenerationConfig
elif model_type == "fine":
__magic_name__ : Optional[Any] = BarkFineModel
__magic_name__ : Dict = BarkFineConfig
__magic_name__ : Tuple = BarkFineGenerationConfig
else:
raise NotImplementedError()
__magic_name__ : int = f'{model_type}_small' if use_small else model_type
__magic_name__ : List[str] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase ):
logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info['repo_id'] , model_info['file_name'] )
__magic_name__ : Optional[Any] = torch.load(lowerCAmelCase , map_location=lowerCAmelCase )
# this is a hack
__magic_name__ : Optional[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
__magic_name__ : Dict = model_args['vocab_size']
__magic_name__ : Optional[int] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__magic_name__ : Optional[Any] = model_args.pop('n_head' )
__magic_name__ : List[str] = model_args.pop('n_embd' )
__magic_name__ : List[Any] = model_args.pop('n_layer' )
__magic_name__ : Optional[Any] = ConfigClass(**checkpoint['model_args'] )
__magic_name__ : Any = ModelClass(config=lowerCAmelCase )
__magic_name__ : List[str] = GenerationConfigClass()
__magic_name__ : List[Any] = model_generation_config
__magic_name__ : str = checkpoint['model']
# fixup checkpoint
__magic_name__ : str = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase ):
# replace part of the key with corresponding layer name in HF implementation
__magic_name__ : Tuple = k[len(lowerCAmelCase ) :]
for old_layer_name in new_layer_name_dict:
__magic_name__ : int = new_k.replace(lowerCAmelCase , new_layer_name_dict[old_layer_name] )
__magic_name__ : Union[str, Any] = state_dict.pop(lowerCAmelCase )
__magic_name__ : Optional[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
__magic_name__ : Any = {k for k in extra_keys if not k.endswith('.attn.bias' )}
__magic_name__ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
__magic_name__ : Dict = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowerCAmelCase ) != 0:
raise ValueError(f'extra keys found: {extra_keys}' )
if len(lowerCAmelCase ) != 0:
raise ValueError(f'missing keys: {missing_keys}' )
model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
__magic_name__ : Union[str, Any] = model.num_parameters(exclude_embeddings=lowerCAmelCase )
__magic_name__ : Optional[Any] = checkpoint['best_val_loss'].item()
logger.info(f'model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase , 3 )} loss' )
model.eval()
model.to(lowerCAmelCase )
del checkpoint, state_dict
return model
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Tuple="text" ):
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__magic_name__ : List[str] = 'cpu' # do conversion on cpu
__magic_name__ : int = _get_ckpt_path(lowerCAmelCase , use_small=lowerCAmelCase )
__magic_name__ : Any = _load_model(lowerCAmelCase , lowerCAmelCase , model_type=lowerCAmelCase , use_small=lowerCAmelCase )
# load bark initial model
__magic_name__ : List[str] = _bark_load_model(lowerCAmelCase , 'cpu' , model_type=lowerCAmelCase , use_small=lowerCAmelCase )
if model_type == "text":
__magic_name__ : int = bark_model['model']
if model.num_parameters(exclude_embeddings=lowerCAmelCase ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__magic_name__ : Union[str, Any] = 5
__magic_name__ : Optional[int] = 10
if model_type in ["text", "coarse"]:
__magic_name__ : Optional[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__magic_name__ : List[str] = bark_model(lowerCAmelCase )[0]
__magic_name__ : Optional[int] = model(lowerCAmelCase )
# take last logits
__magic_name__ : int = output_new_model_total.logits[:, [-1], :]
else:
__magic_name__ : Tuple = 3
__magic_name__ : List[str] = 8
__magic_name__ : List[str] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__magic_name__ : str = model(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Tuple = bark_model(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Tuple = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : str , ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Dict = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) )
__magic_name__ : str = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) )
__magic_name__ : int = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) )
__magic_name__ : List[Any] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__magic_name__ : Optional[int] = BarkSemanticModel.from_pretrained(lowerCAmelCase )
__magic_name__ : Dict = BarkCoarseModel.from_pretrained(lowerCAmelCase )
__magic_name__ : List[str] = BarkFineModel.from_pretrained(lowerCAmelCase )
__magic_name__ : Optional[Any] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
__magic_name__ : Dict = BarkConfig.from_sub_model_configs(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__magic_name__ : int = BarkModel(lowerCAmelCase )
__magic_name__ : List[str] = semantic
__magic_name__ : Optional[int] = coarseAcoustic
__magic_name__ : List[str] = fineAcoustic
__magic_name__ : int = codec
__magic_name__ : Union[str, Any] = bark_generation_config
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
bark.save_pretrained(lowerCAmelCase , repo_id=lowerCAmelCase , push_to_hub=lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowerCAmelCase :Union[str, Any] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small) | 275 | 0 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
a__ : Any = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=1 ) ->Dict:
SCREAMING_SNAKE_CASE : int = tokenizer
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE : Union[str, Any] = n_copies
def __iter__( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = start_length
SCREAMING_SNAKE_CASE : List[str] = eof_strings
SCREAMING_SNAKE_CASE : Dict = tokenizer
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_lowerCamelCase )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , a__=20 , **a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = batch['''ids'''].shape[-1]
SCREAMING_SNAKE_CASE : Any = accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE : List[Any] = batch['''task_id'''].repeat(__snake_case )
SCREAMING_SNAKE_CASE : str = accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE : Tuple = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE : int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = [[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser(__snake_case )
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE : int = '''false'''
if args.num_workers is None:
SCREAMING_SNAKE_CASE : Optional[int] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE : Union[str, Any] = Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token
SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE : str = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset('''openai_humaneval''' )
SCREAMING_SNAKE_CASE : List[Any] = load_metric('''code_eval''' )
SCREAMING_SNAKE_CASE : Any = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
SCREAMING_SNAKE_CASE : List[str] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE : str = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE : str = complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE : Optional[Any] = []
for task in tqdm(range(__snake_case ) ):
SCREAMING_SNAKE_CASE : Any = human_eval['''test'''][task]['''test''']
SCREAMING_SNAKE_CASE : List[Any] = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 313 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 5 | 0 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__lowercase = logging.getLogger(__name__)
def lowerCAmelCase (__UpperCamelCase : torch.nn.Module , __UpperCamelCase : BnbQuantizationConfig , __UpperCamelCase : Union[str, os.PathLike] = None , __UpperCamelCase : Optional[Dict[str, Union[int, str, torch.device]]] = None , __UpperCamelCase : Optional[List[str]] = None , __UpperCamelCase : Optional[Dict[Union[int, str], Union[int, str]]] = None , __UpperCamelCase : Optional[Union[str, os.PathLike]] = None , __UpperCamelCase : bool = False , ):
"""simple docstring"""
__UpperCamelCase =bnb_quantization_config.load_in_abit
__UpperCamelCase =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
__UpperCamelCase =[]
# custom device map
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(device_map.keys() ) > 1:
__UpperCamelCase =[key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__UpperCamelCase =get_keys_to_not_convert(__UpperCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__UpperCamelCase )
__UpperCamelCase =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__UpperCamelCase =[]
__UpperCamelCase =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__UpperCamelCase )
# compatibility with peft
__UpperCamelCase =load_in_abit
__UpperCamelCase =load_in_abit
__UpperCamelCase =get_parameter_device(__UpperCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
__UpperCamelCase =replace_with_bnb_layers(__UpperCamelCase , __UpperCamelCase , modules_to_not_convert=__UpperCamelCase )
# convert param to the right dtype
__UpperCamelCase =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__UpperCamelCase =name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
__UpperCamelCase =getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__UpperCamelCase ):
param.to(__UpperCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
__UpperCamelCase =replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , modules_to_not_convert=__UpperCamelCase )
__UpperCamelCase =get_quantized_model_device_map(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , max_memory=__UpperCamelCase , no_split_module_classes=__UpperCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__UpperCamelCase =True
__UpperCamelCase =any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__UpperCamelCase , offload_state_dict=__UpperCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__UpperCamelCase , device_map=__UpperCamelCase , offload_dir=__UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
__UpperCamelCase ={'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
__UpperCamelCase ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__UpperCamelCase ={}
__UpperCamelCase =special_dtypes
__UpperCamelCase =no_split_module_classes
__UpperCamelCase =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__UpperCamelCase =get_balanced_memory(
__UpperCamelCase , low_zero=(device_map == '''balanced_low_0''') , max_memory=__UpperCamelCase , **__UpperCamelCase , )
__UpperCamelCase =max_memory
__UpperCamelCase =infer_auto_device_map(__UpperCamelCase , **__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
# check if don't have any quantized module on the cpu
__UpperCamelCase =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__UpperCamelCase ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None ):
"""simple docstring"""
if modules_to_not_convert is None:
__UpperCamelCase =[]
__UpperCamelCase , __UpperCamelCase =_replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=None , ):
"""simple docstring"""
__UpperCamelCase =False
for name, module in model.named_children():
if current_key_name is None:
__UpperCamelCase =[]
current_key_name.append(__UpperCamelCase )
if isinstance(__UpperCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__UpperCamelCase ='''.'''.join(__UpperCamelCase )
__UpperCamelCase =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__UpperCamelCase =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__UpperCamelCase =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__UpperCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
__UpperCamelCase =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
__UpperCamelCase =module.weight.data
if module.bias is not None:
__UpperCamelCase =module.bias.data
bnb_module.requires_grad_(__UpperCamelCase )
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =True
if len(list(module.children() ) ) > 0:
__UpperCamelCase , __UpperCamelCase =_replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
with init_empty_weights():
__UpperCamelCase =deepcopy(__UpperCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__UpperCamelCase =find_tied_parameters(__UpperCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__UpperCamelCase =sum(__UpperCamelCase , [] )
__UpperCamelCase =len(__UpperCamelCase ) > 0
# Check if it is a base model
__UpperCamelCase =False
if hasattr(__UpperCamelCase , '''base_model_prefix''' ):
__UpperCamelCase =not hasattr(__UpperCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__UpperCamelCase =list(model.named_children() )
__UpperCamelCase =[list_modules[-1][0]]
# add last module together with tied weights
__UpperCamelCase =set(__UpperCamelCase ) - set(__UpperCamelCase )
__UpperCamelCase =list(set(__UpperCamelCase ) ) + list(__UpperCamelCase )
# remove ".weight" from the keys
__UpperCamelCase =['''.weight''', '''.bias''']
__UpperCamelCase =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__UpperCamelCase =name.replace(__UpperCamelCase , '''''' )
filtered_module_names.append(__UpperCamelCase )
return filtered_module_names
def lowerCAmelCase (__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
for m in model.modules():
if isinstance(__UpperCamelCase , bnb.nn.Linearabit ):
return True
return False
def lowerCAmelCase (__UpperCamelCase : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(__UpperCamelCase , __UpperCamelCase , 0 , dtype=__UpperCamelCase , value=__UpperCamelCase )
__UpperCamelCase =param_name
__UpperCamelCase =model
if "." in tensor_name:
__UpperCamelCase =tensor_name.split('''.''' )
for split in splits[:-1]:
__UpperCamelCase =getattr(__UpperCamelCase , __UpperCamelCase )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
__UpperCamelCase =new_module
__UpperCamelCase =splits[-1]
# offload weights
__UpperCamelCase =False
offload_weight(module._parameters[tensor_name] , __UpperCamelCase , __UpperCamelCase , index=__UpperCamelCase )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __UpperCamelCase , index=__UpperCamelCase , )
else:
offload_weight(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index=__UpperCamelCase )
offload_weight(__UpperCamelCase , param_name.replace('''weight''' , '''SCB''' ) , __UpperCamelCase , index=__UpperCamelCase )
set_module_tensor_to_device(__UpperCamelCase , __UpperCamelCase , '''meta''' , dtype=__UpperCamelCase , value=torch.empty(*param.size() ) )
| 85 | """simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase (__UpperCamelCase : Tuple ):
"""simple docstring"""
__UpperCamelCase =SwinConfig()
__UpperCamelCase =swin_name.split('''_''' )
__UpperCamelCase =name_split[1]
__UpperCamelCase =int(name_split[4] )
__UpperCamelCase =int(name_split[3][-1] )
if model_size == "tiny":
__UpperCamelCase =9_6
__UpperCamelCase =(2, 2, 6, 2)
__UpperCamelCase =(3, 6, 1_2, 2_4)
elif model_size == "small":
__UpperCamelCase =9_6
__UpperCamelCase =(2, 2, 1_8, 2)
__UpperCamelCase =(3, 6, 1_2, 2_4)
elif model_size == "base":
__UpperCamelCase =1_2_8
__UpperCamelCase =(2, 2, 1_8, 2)
__UpperCamelCase =(4, 8, 1_6, 3_2)
else:
__UpperCamelCase =1_9_2
__UpperCamelCase =(2, 2, 1_8, 2)
__UpperCamelCase =(6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
__UpperCamelCase =2_1_8_4_1
else:
__UpperCamelCase =1_0_0_0
__UpperCamelCase ='''huggingface/label-files'''
__UpperCamelCase ='''imagenet-1k-id2label.json'''
__UpperCamelCase =json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase ={int(__UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =img_size
__UpperCamelCase =num_classes
__UpperCamelCase =embed_dim
__UpperCamelCase =depths
__UpperCamelCase =num_heads
__UpperCamelCase =window_size
return config
def lowerCAmelCase (__UpperCamelCase : Optional[int] ):
"""simple docstring"""
if "patch_embed.proj" in name:
__UpperCamelCase =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__UpperCamelCase =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__UpperCamelCase ='''encoder.''' + name
if "attn.proj" in name:
__UpperCamelCase =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__UpperCamelCase =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__UpperCamelCase =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__UpperCamelCase =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__UpperCamelCase =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__UpperCamelCase =name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
__UpperCamelCase ='''layernorm.weight'''
if name == "norm.bias":
__UpperCamelCase ='''layernorm.bias'''
if "head" in name:
__UpperCamelCase =name.replace('''head''' , '''classifier''' )
else:
__UpperCamelCase ='''swin.''' + name
return name
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__UpperCamelCase =orig_state_dict.pop(__UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
__UpperCamelCase =key.split('''.''' )
__UpperCamelCase =int(key_split[1] )
__UpperCamelCase =int(key_split[3] )
__UpperCamelCase =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCamelCase =val[:dim, :]
__UpperCamelCase =val[
dim : dim * 2, :
]
__UpperCamelCase =val[-dim:, :]
else:
__UpperCamelCase =val[
:dim
]
__UpperCamelCase =val[
dim : dim * 2
]
__UpperCamelCase =val[
-dim:
]
else:
__UpperCamelCase =val
return orig_state_dict
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase =timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase )
timm_model.eval()
__UpperCamelCase =get_swin_config(__UpperCamelCase )
__UpperCamelCase =SwinForImageClassification(__UpperCamelCase )
model.eval()
__UpperCamelCase =convert_state_dict(timm_model.state_dict() , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
__UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase =AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
__UpperCamelCase =Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
__UpperCamelCase =image_processor(images=__UpperCamelCase , return_tensors='''pt''' )
__UpperCamelCase =timm_model(inputs['''pixel_values'''] )
__UpperCamelCase =model(**__UpperCamelCase ).logits
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 85 | 1 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(__lowerCAmelCase ) , version.parse(__lowerCAmelCase ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __A ( __lowerCAmelCase , __lowerCAmelCase = None )-> None:
"""simple docstring"""
_UpperCAmelCase = F"""\n{hint}""" if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , __lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = requirement, None, None
else:
_UpperCAmelCase = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , __lowerCAmelCase )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F""" got {requirement}""" )
_UpperCAmelCase , _UpperCAmelCase = match[0]
_UpperCAmelCase = want_full.split(',' ) # there could be multiple requirements
_UpperCAmelCase = {}
for w in want_range:
_UpperCAmelCase = re.findall(R'^([\s!=<>]{1,2})(.+)' , __lowerCAmelCase )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F""" but got {requirement}""" )
_UpperCAmelCase , _UpperCAmelCase = match[0]
_UpperCAmelCase = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
_UpperCAmelCase = '.'.join([str(__lowerCAmelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return
# check if any version is installed
try:
_UpperCAmelCase = importlib.metadata.version(__lowerCAmelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __A ( __lowerCAmelCase )-> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(__lowerCAmelCase , __lowerCAmelCase )
| 39 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class a ( unittest.TestCase ):
def __UpperCAmelCase ( self , __magic_name__ ) -> List[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_a = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__magic_name__ )
def __UpperCAmelCase ( self ) -> Tuple:
_a = 'sshleifer/tiny-gpt2'
_a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_a = PyTorchBenchmark(__magic_name__ )
_a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ) -> str:
_a = 'sgugger/tiny-distilbert-classification'
_a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , )
_a = PyTorchBenchmark(__magic_name__ )
_a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = 'sshleifer/tiny-gpt2'
_a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , torchscript=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_a = PyTorchBenchmark(__magic_name__ )
_a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __UpperCAmelCase ( self ) -> List[str]:
_a = 'sshleifer/tiny-gpt2'
_a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , fpaa=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_a = PyTorchBenchmark(__magic_name__ )
_a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ) -> List[str]:
_a = 'sshleifer/tiny-gpt2'
_a = AutoConfig.from_pretrained(__magic_name__ )
# set architectures equal to `None`
_a = None
_a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_a = PyTorchBenchmark(__magic_name__ , configs=[config] )
_a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = 'sshleifer/tiny-gpt2'
_a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_a = PyTorchBenchmark(__magic_name__ )
_a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def __UpperCAmelCase ( self ) -> Tuple:
_a = 'sshleifer/tiny-gpt2'
_a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__magic_name__ , multi_process=__magic_name__ , )
_a = PyTorchBenchmark(__magic_name__ )
_a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ) -> int:
_a = 'sshleifer/tiny-gpt2'
_a = AutoConfig.from_pretrained(__magic_name__ )
_a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_a = PyTorchBenchmark(__magic_name__ , configs=[config] )
_a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ) -> List[str]:
_a = 'sshleifer/tinier_bart'
_a = AutoConfig.from_pretrained(__magic_name__ )
_a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_a = PyTorchBenchmark(__magic_name__ , configs=[config] )
_a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ) -> Any:
_a = 'sshleifer/tiny-gpt2'
_a = AutoConfig.from_pretrained(__magic_name__ )
_a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_a = PyTorchBenchmark(__magic_name__ , configs=[config] )
_a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = 'sshleifer/tinier_bart'
_a = AutoConfig.from_pretrained(__magic_name__ )
_a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_a = PyTorchBenchmark(__magic_name__ , configs=[config] )
_a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ) -> Dict:
_a = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(__magic_name__ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(__magic_name__ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(__magic_name__ , 'train_time.csv' ) , env_info_csv_file=os.path.join(__magic_name__ , 'env.csv' ) , multi_process=__magic_name__ , )
_a = PyTorchBenchmark(__magic_name__ )
benchmark.run()
self.assertTrue(Path(os.path.join(__magic_name__ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'env.csv' ) ).exists() )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__magic_name__ ):
self.assertTrue(hasattr(__magic_name__ , 'sequential' ) )
self.assertTrue(hasattr(__magic_name__ , 'cumulative' ) )
self.assertTrue(hasattr(__magic_name__ , 'current' ) )
self.assertTrue(hasattr(__magic_name__ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , 'log.txt' ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , multi_process=__magic_name__ , )
_a = PyTorchBenchmark(__magic_name__ )
_a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__magic_name__ , 'log.txt' ) ).exists() )
| 104 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a_ : Optional[Any] = TypeVar("T")
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (position - 1) // 2
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 1
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 2
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = []
_a = {}
_a = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def __UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_a = self.elements
self.elements += 1
self._bubble_up(__magic_name__ )
def __UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_a , _a = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_a , _a = self.heap[0]
self._bubble_down(__magic_name__ )
return elem
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Update the weight of the given key
_a = self.position_map[elem]
_a = (elem, weight)
if position > 0:
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_a = self.position_map[elem]
if curr_pos == 0:
return None
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[curr_pos]
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_up(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_a = self.position_map[elem]
_a , _a = self.heap[curr_pos]
_a = get_child_left_position(__magic_name__ )
_a = get_child_right_position(__magic_name__ )
if child_left_position < self.elements and child_right_position < self.elements:
_a , _a = self.heap[child_left_position]
_a , _a = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
if child_left_position < self.elements:
_a , _a = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
else:
return None
if child_right_position < self.elements:
_a , _a = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Swap the nodes at the given positions
_a = self.heap[nodea_pos][0]
_a = self.heap[nodea_pos][0]
_a , _a = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_a = nodea_pos
_a = nodea_pos
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = {}
_a = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_a = {}
self.nodes += 1
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__magic_name__ )
self.add_node(__magic_name__ )
_a = weight
_a = weight
def _A (lowerCAmelCase__ :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
_a = {node: maxsize for node in graph.connections}
_a = {node: None for node in graph.connections}
_a = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase__ , lowerCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
_a = priority_queue.extract_min()
_a = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
# running prim's algorithm
while not priority_queue.is_empty():
_a = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
return dist, parent
| 104 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A__ : str = "CIDAS/clipseg-rd64-refined"
A__ : Any = "image_segmenter"
A__ : str = CLIPSegForImageSegmentation
A__ : Optional[int] = ["image", "text"]
A__ : Optional[Any] = ["image"]
def __init__( self: List[str] ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> Optional[int]:
requires_backends(self ,["""vision"""] )
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Optional[int] ,lowerCamelCase_: "Image" ,lowerCamelCase_: str ) -> Union[str, Any]:
return self.pre_processor(text=[label] ,images=[image] ,padding=lowerCamelCase_ ,return_tensors="""pt""" )
def A__ ( self: str ,lowerCamelCase_: List[str] ) -> Union[str, Any]:
with torch.no_grad():
UpperCAmelCase_ : List[Any] = self.model(**lowerCamelCase_ ).logits
return logits
def A__ ( self: int ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : Tuple = outputs.cpu().detach().numpy()
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Union[str, Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 345 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : int = num_heads
UpperCAmelCase_ : List[Any] = window_size
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[Any] = use_absolute_embeddings
UpperCAmelCase_ : List[Any] = patch_norm
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Optional[int] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : List[Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[str] = False
A__ : Any = False
A__ : Any = False
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = FocalNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ )
def A__ ( self: List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: List[str] ) -> Union[str, Any]:
return
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: int ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self: Optional[Any] ) -> Optional[Any]:
pass
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Any = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# FocalNet has a different seq_length
UpperCAmelCase_ : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape
UpperCAmelCase_ : List[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
@slow
def A__ ( self: Optional[int] ) -> Optional[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Optional[int] ) -> str:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A__ : int = FocalNetConfig
A__ : List[str] = False
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : str = FocalNetModelTester(self )
| 345 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """bert-generation"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int]=50_358 , SCREAMING_SNAKE_CASE : List[Any]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=24 , SCREAMING_SNAKE_CASE : List[str]=16 , SCREAMING_SNAKE_CASE : Tuple=4_096 , SCREAMING_SNAKE_CASE : List[Any]="gelu" , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=512 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Optional[int]=1E-1_2 , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : List[str]=2 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : str="absolute" , SCREAMING_SNAKE_CASE : Optional[int]=True , **SCREAMING_SNAKE_CASE : str , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : Any = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Any = hidden_act
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : int = max_position_embeddings
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Dict = position_embedding_type
lowercase__ : Union[str, Any] = use_cache
| 354 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class snake_case__(Generic[KEY, VAL] ):
"""simple docstring"""
lowercase_ = 42
lowercase_ = 42
class snake_case__(_Item ):
"""simple docstring"""
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __bool__( self : Tuple ):
return False
lowerCAmelCase__ = _DeletedItem()
class snake_case__(MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int = 8 , SCREAMING_SNAKE_CASE : float = 0.75 ):
lowercase__ : Any = initial_block_size
lowercase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowercase__ : Dict = capacity_factor
lowercase__ : Optional[int] = 0
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : KEY ):
return hash(SCREAMING_SNAKE_CASE ) % len(self._buckets )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int ):
return (ind + 1) % len(self._buckets )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : KEY , SCREAMING_SNAKE_CASE : VAL ):
lowercase__ : Tuple = self._buckets[ind]
if not stored:
lowercase__ : int = _Item(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self._len += 1
return True
elif stored.key == key:
lowercase__ : str = _Item(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return True
else:
return False
def snake_case ( self : str ):
lowercase__ : str = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowercase__ : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Tuple = self._buckets
lowercase__ : Optional[int] = [None] * new_size
lowercase__ : int = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def snake_case ( self : int ):
self._resize(len(self._buckets ) * 2 )
def snake_case ( self : Optional[Any] ):
self._resize(len(self._buckets ) // 2 )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : KEY ):
lowercase__ : Tuple = self._get_bucket_index(SCREAMING_SNAKE_CASE )
for _ in range(len(self._buckets ) ):
yield ind
lowercase__ : Union[str, Any] = self._get_next_ind(SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : KEY , SCREAMING_SNAKE_CASE : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE ):
if self._try_set(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
break
def __setitem__( self : List[str] , SCREAMING_SNAKE_CASE : KEY , SCREAMING_SNAKE_CASE : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __delitem__( self : int , SCREAMING_SNAKE_CASE : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE ):
lowercase__ : Union[str, Any] = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE )
if item is _deleted:
continue
if item.key == key:
lowercase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple , SCREAMING_SNAKE_CASE : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE )
def __len__( self : Optional[Any] ):
return self._len
def __iter__( self : List[str] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowercase__ : int = " ,".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 121 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : List[Any] = '▁'
__lowercase : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
__lowercase : Any = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__lowercase : List[Any] = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
__lowercase : List[Any] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = ["input_ids", "attention_mask"]
A_ = []
A_ = []
def __init__( self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=None , __a=None , __a=None , __a = None , __a=None , **__a , ):
'''simple docstring'''
__a : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__a : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , tokenizer_file=__a , src_lang=__a , tgt_lang=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
__a : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__a : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a : str = 1
__a : Any = len(self.sp_model )
__a : Dict = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__a )
}
__a : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
__a : Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__a : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__a : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__a : Dict = src_lang if src_lang is not None else 'en_XX'
__a : int = self.lang_code_to_id[self._src_lang]
__a : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
'''simple docstring'''
__a : int = self.__dict__.copy()
__a : str = None
__a : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __a ):
'''simple docstring'''
__a : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : Tuple = {}
__a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , __a , __a = None , __a = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
__a : int = [1] * len(self.prefix_tokens )
__a : List[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : List[str] = [self.sep_token_id]
__a : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , __a , __a , __a , __a , **__a ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__a : Dict = src_lang
__a : List[Any] = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
__a : Any = self.convert_tokens_to_ids(__a )
__a : Any = tgt_lang_id
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.sp_model.encode(__a , out_type=__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a : Dict = self.sp_model.PieceToId(__a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[str] = ''.join(__a ).replace(__a , ' ' ).strip()
return out_string
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Dict = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , 'wb' ) as fi:
__a : int = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def __UpperCAmelCase ( self , __a , __a = "en_XX" , __a = None , __a = "ro_RO" , **__a , ):
'''simple docstring'''
__a : List[str] = src_lang
__a : str = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = self.lang_code_to_id[src_lang]
__a : List[Any] = []
__a : Dict = [self.eos_token_id, self.cur_lang_code]
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Optional[Any] = self.lang_code_to_id[lang]
__a : int = []
__a : int = [self.eos_token_id, self.cur_lang_code]
| 27 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int=13 , lowerCamelCase__ : Union[str, Any]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : str=True , lowerCamelCase__ : str=True , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Tuple=10 , lowerCamelCase__ : List[Any]=0.02 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : str=0.6 , lowerCamelCase__ : int=None , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Any = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : List[Any] = image_size
UpperCamelCase__ : str = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : int = is_training
UpperCamelCase__ : Dict = use_labels
UpperCamelCase__ : int = hidden_size
UpperCamelCase__ : Union[str, Any] = num_hidden_layers
UpperCamelCase__ : Tuple = num_attention_heads
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : str = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = type_sequence_label_size
UpperCamelCase__ : str = initializer_range
UpperCamelCase__ : str = mask_ratio
UpperCamelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : Optional[int] = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = ViTMAEModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : List[str] = model(lowerCamelCase__ )
UpperCamelCase__ : int = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : int = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Any = model(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : Any = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = config_and_inputs
UpperCamelCase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
A: Optional[Any] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
A: Union[str, Any] = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
A: Any = False
A: str = False
A: Optional[int] = False
A: Any = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ViTMAEModelTester(self )
UpperCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase__ : List[str] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase__ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase__ : Optional[Any] = torch.from_numpy(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : Union[str, Any] = pt_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCamelCase__ : int = outputs[0].cpu().numpy()
UpperCamelCase__ : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ : Any = model_class.from_pretrained(lowerCamelCase__ )
model.to(lowerCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
# Make sure we don't have nans
UpperCamelCase__ : Union[str, Any] = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Dict = ViTMAEModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : str ) -> Any:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase__ : Dict = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = self.default_image_processor
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : str = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Tuple = ViTMAEConfig()
UpperCamelCase__ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase__ : Dict = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**lowerCamelCase__ , noise=torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ ) )
# verify the logits
UpperCamelCase__ : Optional[Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase__ ) , atol=1E-4 ) )
| 146 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCamelCase__ = logging.getLogger(__name__)
class a__ :
def __init__( self ):
"""simple docstring"""
__lowerCAmelCase = False
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
if not self.initialized:
__lowerCAmelCase = RagRetriever(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
__lowerCAmelCase = True
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.retriever.index.init_index()
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.retriever._main_retrieve(_A , _A )
return doc_ids, retrieved_doc_embeds
class a__ ( snake_case__ ):
def __init__( self , _A , _A , _A , _A , _A=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(_A ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
__lowerCAmelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_A , _A , _A , _A )
for worker in self.retrieval_workers
] )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__lowerCAmelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__lowerCAmelCase , __lowerCAmelCase = ray.get(random_worker.retrieve.remote(_A , _A ) )
else:
__lowerCAmelCase , __lowerCAmelCase = self._main_retrieve(_A , _A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_A )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , _A , _A=None , **_A ):
"""simple docstring"""
return super(_A , cls ).get_tokenizers(_A , _A , **_A )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase = kwargs.pop("config" , _A ) or RagConfig.from_pretrained(_A , **_A )
__lowerCAmelCase = RagTokenizer.from_pretrained(_A , config=_A )
__lowerCAmelCase = rag_tokenizer.question_encoder
__lowerCAmelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__lowerCAmelCase = "custom"
__lowerCAmelCase = CustomHFIndex(config.retrieval_vector_size , _A )
else:
__lowerCAmelCase = cls._build_index(_A )
return cls(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , retrieval_workers=_A , index=_A , )
| 102 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _SCREAMING_SNAKE_CASE () -> Optional[Any]:
'''simple docstring'''
lowercase_ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__lowerCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__lowerCAmelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__lowerCAmelCase )
return parser.parse_args()
def _SCREAMING_SNAKE_CASE () -> Any:
'''simple docstring'''
lowercase_ = parse_args()
# Import training_script as a module.
lowercase_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase_ = script_fpath.stem
lowercase_ = importlib.import_module(__lowerCAmelCase )
# Patch sys.argv
lowercase_ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 136 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : int = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "encodec"
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase_ : Tuple=2_4_0_0_0 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=1_2_8 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : Dict=[8, 5, 4, 2] , lowerCAmelCase_ : Optional[Any]="weight_norm" , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="reflect" , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[Any]=1.0 , lowerCAmelCase_ : Dict=1_0_2_4 , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=True , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
lowercase_ = target_bandwidths
lowercase_ = sampling_rate
lowercase_ = audio_channels
lowercase_ = normalize
lowercase_ = chunk_length_s
lowercase_ = overlap
lowercase_ = hidden_size
lowercase_ = num_filters
lowercase_ = num_residual_layers
lowercase_ = upsampling_ratios
lowercase_ = norm_type
lowercase_ = kernel_size
lowercase_ = last_kernel_size
lowercase_ = residual_kernel_size
lowercase_ = dilation_growth_rate
lowercase_ = use_causal_conv
lowercase_ = pad_mode
lowercase_ = compress
lowercase_ = num_lstm_layers
lowercase_ = trim_right_ratio
lowercase_ = codebook_size
lowercase_ = codebook_dim if codebook_dim is not None else hidden_size
lowercase_ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''')
super().__init__(**lowerCAmelCase_)
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0))
| 136 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "donut-swin"
_SCREAMING_SNAKE_CASE : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self : Optional[int] , snake_case_ : Tuple=2_2_4 , snake_case_ : Any=4 , snake_case_ : Optional[Any]=3 , snake_case_ : Optional[Any]=9_6 , snake_case_ : str=[2, 2, 6, 2] , snake_case_ : Optional[Any]=[3, 6, 1_2, 2_4] , snake_case_ : Optional[int]=7 , snake_case_ : List[str]=4.0 , snake_case_ : Optional[int]=True , snake_case_ : str=0.0 , snake_case_ : int=0.0 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple="gelu" , snake_case_ : Optional[Any]=False , snake_case_ : Tuple=0.02 , snake_case_ : List[Any]=1E-5 , **snake_case_ : Optional[Any] , ):
super().__init__(**snake_case_ )
__a : Optional[int] = image_size
__a : Tuple = patch_size
__a : Tuple = num_channels
__a : List[Any] = embed_dim
__a : List[Any] = depths
__a : Dict = len(snake_case_ )
__a : Union[str, Any] = num_heads
__a : Optional[Any] = window_size
__a : Dict = mlp_ratio
__a : Dict = qkv_bias
__a : List[str] = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : Dict = drop_path_rate
__a : int = hidden_act
__a : Any = use_absolute_embeddings
__a : Optional[Any] = layer_norm_eps
__a : str = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__a : str = int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
| 352 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ ={
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['LayoutLMv2FeatureExtractor']
lowercase__ =['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 90 | 0 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
lowercase__ = namedtuple("""covid_data""", """cases deaths recovered""")
def _snake_case ( lowercase__ = "https://www.worldometers.info/coronavirus/" ):
_lowerCamelCase : Any = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(lowercase__ ).content ).xpath(lowercase__ ) )
lowercase__ = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats())) | 96 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
def constraint_to_multiple_of(lowercase__ , lowercase__ , lowercase__=0 , lowercase__=None ):
__lowerCAmelCase : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
__lowerCAmelCase : Any = math.ceil(val / multiple ) * multiple
return x
__lowerCAmelCase : Dict = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = get_image_size(lowercase__ )
__lowerCAmelCase, __lowerCAmelCase : int = output_size
# determine new height and width
__lowerCAmelCase : Optional[Any] = output_height / input_height
__lowerCAmelCase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__lowerCAmelCase : str = scale_width
else:
# fit height
__lowerCAmelCase : str = scale_height
__lowerCAmelCase : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ )
__lowerCAmelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ )
return (new_height, new_width)
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = False , A_ = 1 , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ) ->None:
'''simple docstring'''
super().__init__(**A_ )
__lowerCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 384, '''width''': 384}
__lowerCAmelCase : Dict = get_size_dict(A_ )
__lowerCAmelCase : Optional[Any] = do_resize
__lowerCAmelCase : int = size
__lowerCAmelCase : Dict = keep_aspect_ratio
__lowerCAmelCase : List[Any] = ensure_multiple_of
__lowerCAmelCase : Tuple = resample
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Any = rescale_factor
__lowerCAmelCase : List[Any] = do_normalize
__lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , A_ , A_ , A_ = False , A_ = 1 , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) ->np.ndarray:
'''simple docstring'''
__lowerCAmelCase : int = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase : Union[str, Any] = get_resize_output_image_size(
A_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=A_ , multiple=A_ , )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->Dict:
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray:
'''simple docstring'''
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) ->PIL.Image.Image:
'''simple docstring'''
__lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : Optional[int] = size if size is not None else self.size
__lowerCAmelCase : Union[str, Any] = get_size_dict(A_ )
__lowerCAmelCase : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__lowerCAmelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__lowerCAmelCase : Tuple = resample if resample is not None else self.resample
__lowerCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
__lowerCAmelCase : Optional[Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase : Any = [to_numpy_array(A_ ) for image in images]
if do_resize:
__lowerCAmelCase : Optional[Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_rescale:
__lowerCAmelCase : Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__lowerCAmelCase : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__lowerCAmelCase : Union[str, Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
__lowerCAmelCase : Dict = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
def UpperCamelCase__ ( self , A_ , A_ = None ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(A_ ):
__lowerCAmelCase : Optional[int] = target_sizes.numpy()
__lowerCAmelCase : List[str] = []
for idx in range(len(A_ ) ):
__lowerCAmelCase : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ )
__lowerCAmelCase : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
__lowerCAmelCase : Any = logits.argmax(dim=1 )
__lowerCAmelCase : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 275 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __lowerCAmelCase ():
_UpperCAmelCase : Dict = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=__lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=__lowerCAmelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=__lowerCAmelCase )
return parser.parse_args()
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Any = script_fpath.stem
_UpperCAmelCase : List[str] = importlib.import_module(__lowerCAmelCase )
# Patch sys.argv
_UpperCAmelCase : Optional[int] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 354 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int ) ->str:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowerCamelCase__ ) )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = [sequences]
_UpperCAmelCase : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = args_parser
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : str=TruncationStrategy.ONLY_FIRST , **lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
_UpperCAmelCase : Optional[Any] = self.tokenizer.eos_token
try:
_UpperCAmelCase : List[str] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , )
except Exception as e:
if "too short" in str(lowerCamelCase__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_UpperCAmelCase : List[Any] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase__ ( self : int , **lowerCamelCase__ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
if kwargs.get("multi_class" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : int = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
_UpperCAmelCase : Dict = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : List[Any] = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
_UpperCAmelCase : Dict = kwargs["hypothesis_template"]
_UpperCAmelCase : List[str] = {}
if "multi_label" in kwargs:
_UpperCAmelCase : Optional[Any] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : int , lowerCamelCase__ : Union[str, List[str]] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[Any] , ) ->Optional[int]:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
pass
elif len(lowerCamelCase__ ) == 1 and "candidate_labels" not in kwargs:
_UpperCAmelCase : int = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : str="This example is {}." ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._args_parser(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
_UpperCAmelCase : Optional[int] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase__ ) - 1,
**model_input,
}
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = inputs["candidate_label"]
_UpperCAmelCase : Optional[int] = inputs["sequence"]
_UpperCAmelCase : Dict = {k: inputs[k] for k in self.tokenizer.model_input_names}
_UpperCAmelCase : List[Any] = self.model(**lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple=False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [outputs["candidate_label"] for outputs in model_outputs]
_UpperCAmelCase : Any = [outputs["sequence"] for outputs in model_outputs]
_UpperCAmelCase : Optional[int] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
_UpperCAmelCase : Optional[Any] = logits.shape[0]
_UpperCAmelCase : Any = len(lowerCamelCase__ )
_UpperCAmelCase : str = N // n
_UpperCAmelCase : str = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_UpperCAmelCase : int = self.entailment_id
_UpperCAmelCase : List[Any] = -1 if entailment_id == 0 else 0
_UpperCAmelCase : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_UpperCAmelCase : int = reshaped_outputs[..., self.entailment_id]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 322 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : Optional[Any]=False ):
'''simple docstring'''
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def UpperCamelCase_( snake_case : Any , snake_case : Tuple , snake_case : str=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = ""
else:
snake_case_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
snake_case_ = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_( snake_case : Tuple ):
'''simple docstring'''
snake_case_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
def UpperCamelCase_( snake_case : List[str] , snake_case : Union[str, Any] , snake_case : int ):
'''simple docstring'''
snake_case_ = dct.pop(snake_case )
snake_case_ = val
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : Any , snake_case : str=True ):
'''simple docstring'''
snake_case_ = ViTConfig()
# patch_size
if model_name[-1] == "8":
snake_case_ = 8
# set labels if required
if not base_model:
snake_case_ = 1_0_0_0
snake_case_ = "huggingface/label-files"
snake_case_ = "imagenet-1k-id2label.json"
snake_case_ = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) )
snake_case_ = {int(snake_case ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
snake_case_ = 3_8_4
snake_case_ = 1_5_3_6
snake_case_ = 1_2
snake_case_ = 6
# load original model from torch hub
snake_case_ = torch.hub.load("facebookresearch/dino:main" , snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = original_model.state_dict()
if base_model:
remove_classification_head_(snake_case )
snake_case_ = create_rename_keys(snake_case , base_model=snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
read_in_q_k_v(snake_case , snake_case , snake_case )
# load HuggingFace model
if base_model:
snake_case_ = ViTModel(snake_case , add_pooling_layer=snake_case ).eval()
else:
snake_case_ = ViTForImageClassification(snake_case ).eval()
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
snake_case_ = ViTImageProcessor()
snake_case_ = image_processor(images=prepare_img() , return_tensors="pt" )
snake_case_ = encoding["pixel_values"]
snake_case_ = model(snake_case )
if base_model:
snake_case_ = original_model(snake_case )
assert torch.allclose(snake_case , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
snake_case_ = original_model(snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(snake_case , outputs.logits , atol=1e-3 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
_SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 85 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Tuple = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
_SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=True , _A=1 / 255 , _A=True , ) -> Optional[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = do_pad
def _UpperCamelCase ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCamelCase ( self , _A , _A=False ) -> str:
if not batched:
SCREAMING_SNAKE_CASE_ = image_inputs[0]
if isinstance(_A , Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE_ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ = max(_A , key=lambda _A : item[0] )[0]
SCREAMING_SNAKE_CASE_ = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ConditionalDetrImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = ConditionalDetrImageProcessingTester(self )
@property
def _UpperCamelCase ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , _A )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _A )
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A , batched=_A )
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase ( self ) -> List[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCamelCase ( self ) -> str:
# prepare image and target
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ = {'''image_id''': 39769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE_ = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
SCREAMING_SNAKE_CASE_ = image_processing(images=_A , annotations=_A , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
SCREAMING_SNAKE_CASE_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
@slow
def _UpperCamelCase ( self ) -> Tuple:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
SCREAMING_SNAKE_CASE_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE_ = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE_ = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify masks
SCREAMING_SNAKE_CASE_ = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A )
# verify orig_size
SCREAMING_SNAKE_CASE_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
SCREAMING_SNAKE_CASE_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
| 257 |
def A__ ( __lowerCamelCase ):
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
__UpperCAmelCase = int(input("Enter number: ").strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 257 | 1 |
'''simple docstring'''
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__( _lowerCamelCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = 'naver-clova-ix/donut-base-finetuned-docvqa'
UpperCAmelCase_ : List[Any] = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
UpperCAmelCase_ : Union[str, Any] = 'document_qa'
UpperCAmelCase_ : List[Any] = AutoProcessor
UpperCAmelCase_ : Union[str, Any] = VisionEncoderDecoderModel
UpperCAmelCase_ : int = ['image', 'text']
UpperCAmelCase_ : Union[str, Any] = ['text']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""")
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
lowerCAmelCase = task_prompt.replace("""{user_input}""" , _SCREAMING_SNAKE_CASE)
lowerCAmelCase = self.pre_processor.tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors="""pt""").input_ids
lowerCAmelCase = self.pre_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""").pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.model.generate(
inputs["""pixel_values"""].to(self.device) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_SCREAMING_SNAKE_CASE , ).sequences
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE)[0]
lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , """""")
lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , """""")
lowerCAmelCase = re.sub(r"""<.*?>""" , """""" , _SCREAMING_SNAKE_CASE , count=1).strip() # remove first task start token
lowerCAmelCase = self.pre_processor.tokenajson(_SCREAMING_SNAKE_CASE)
return sequence["answer"]
| 272 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
SCREAMING_SNAKE_CASE : int = R'''\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'''
@add_start_docstrings(__lowercase )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'rag'
__UpperCamelCase = True
def __init__(self , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=" / " , lowerCamelCase=" // " , lowerCamelCase=5 , lowerCamelCase=300 , lowerCamelCase=768 , lowerCamelCase=8 , lowerCamelCase="wiki_dpr" , lowerCamelCase="train" , lowerCamelCase="compressed" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(
bos_token_id=__A , pad_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , is_encoder_decoder=__A , prefix=__A , vocab_size=__A , **__A , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase = kwargs.pop("""question_encoder""" )
_lowerCAmelCase = question_encoder_config.pop("""model_type""" )
_lowerCAmelCase = kwargs.pop("""generator""" )
_lowerCAmelCase = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase = AutoConfig.for_model(__A , **__A )
_lowerCAmelCase = AutoConfig.for_model(__A , **__A )
_lowerCAmelCase = reduce_loss
_lowerCAmelCase = label_smoothing
_lowerCAmelCase = exclude_bos_score
_lowerCAmelCase = do_marginalize
_lowerCAmelCase = title_sep
_lowerCAmelCase = doc_sep
_lowerCAmelCase = n_docs
_lowerCAmelCase = max_combined_length
_lowerCAmelCase = dataset
_lowerCAmelCase = dataset_split
_lowerCAmelCase = index_name
_lowerCAmelCase = retrieval_vector_size
_lowerCAmelCase = retrieval_batch_size
_lowerCAmelCase = passages_path
_lowerCAmelCase = index_path
_lowerCAmelCase = use_dummy_dataset
_lowerCAmelCase = output_retrieved
_lowerCAmelCase = do_deduplication
_lowerCAmelCase = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase = getattr(self.generator , """forced_eos_token_id""" , __A )
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__A )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = self.question_encoder.to_dict()
_lowerCAmelCase = self.generator.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output | 359 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple:
"""simple docstring"""
def run_func(snake_case_ : Union[str, Any] ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
_lowerCAmelCase = random.Random()
_lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = "TensorFlow"
@property
def A__ (self ):
'''simple docstring'''
return tf.__version__
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase , training=lowerCamelCase )
_lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
_lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase = timeit.repeat(
lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase )
_lowerCAmelCase = meminfo.used
_lowerCAmelCase = Memory(lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_lowerCAmelCase = None
else:
_lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase )
_lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase = stop_memory_tracing(lowerCamelCase )
if memory is None:
_lowerCAmelCase = summary.total
else:
_lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None | 317 | 0 |
"""simple docstring"""
from PIL import Image
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : Optional[int] ):
'''simple docstring'''
def brightness(__snake_case : str ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(__snake_case )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_UpperCamelCase : Tuple = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 220 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase_ ) for s in shape] )}.npy"""
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=(4, 4, 6_4, 6_4) , lowerCAmelCase_ : List[str]=False ):
"""simple docstring"""
_A: List[str] = jnp.bfloataa if fpaa else jnp.floataa
_A: Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) , dtype=lowerCAmelCase_ )
return image
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Optional[Any]="CompVis/stable-diffusion-v1-4" ):
"""simple docstring"""
_A: Tuple = jnp.bfloataa if fpaa else jnp.floataa
_A: str = '''bf16''' if fpaa else None
_A , _A: Union[str, Any] = FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase_ , subfolder='''unet''' , dtype=lowerCAmelCase_ , revision=lowerCAmelCase_ )
return model, params
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : str=(4, 7_7, 7_6_8) , lowerCAmelCase_ : Dict=False ):
"""simple docstring"""
_A: Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
_A: Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) , dtype=lowerCAmelCase_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any ):
"""simple docstring"""
_A , _A: Optional[Any] = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=lowerCAmelCase_ )
_A: List[str] = self.get_latents(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
_A: Optional[int] = self.get_encoder_hidden_states(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
_A: List[str] = model.apply(
{'''params''': params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
_A: Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_A: Tuple = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def __magic_name__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A , _A: Union[str, Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=lowerCAmelCase_ )
_A: Dict = self.get_latents(lowerCAmelCase_ , shape=(4, 4, 9_6, 9_6) , fpaa=lowerCAmelCase_ )
_A: Dict = self.get_encoder_hidden_states(lowerCAmelCase_ , shape=(4, 7_7, 1_0_2_4) , fpaa=lowerCAmelCase_ )
_A: Optional[int] = model.apply(
{'''params''': params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
_A: List[str] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_A: List[str] = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-2 )
| 121 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger()
def _A ( snake_case , snake_case , snake_case , snake_case , snake_case = True ) -> Optional[Any]:
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
_lowercase : List[str] = timm.create_model("levit_128s" , pretrained=snake_case )
else:
_lowercase : Dict = timm.create_model("levit_128" , pretrained=snake_case )
if hidden_sizes == 1_92:
_lowercase : Optional[int] = timm.create_model("levit_192" , pretrained=snake_case )
if hidden_sizes == 2_56:
_lowercase : Union[str, Any] = timm.create_model("levit_256" , pretrained=snake_case )
if hidden_sizes == 3_84:
_lowercase : Optional[Any] = timm.create_model("levit_384" , pretrained=snake_case )
from_model.eval()
_lowercase : int = LevitForImageClassificationWithTeacher(snake_case ).eval()
_lowercase : Dict = OrderedDict()
_lowercase : Optional[int] = from_model.state_dict()
_lowercase : str = list(from_model.state_dict().keys() )
_lowercase : Union[str, Any] = list(our_model.state_dict().keys() )
print(len(snake_case ) , len(snake_case ) )
for i in range(len(snake_case ) ):
_lowercase : Tuple = weights[og_keys[i]]
our_model.load_state_dict(snake_case )
_lowercase : List[str] = torch.randn((2, 3, 2_24, 2_24) )
_lowercase : int = from_model(snake_case )
_lowercase : List[str] = our_model(snake_case ).logits
assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one."
_lowercase : int = name
print(snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_lowercase : str = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def _A ( snake_case , snake_case = None , snake_case = True ) -> Union[str, Any]:
_lowercase : Tuple = "imagenet-1k-id2label.json"
_lowercase : int = 10_00
_lowercase : Optional[Any] = (1, num_labels)
_lowercase : Any = "huggingface/label-files"
_lowercase : List[Any] = num_labels
_lowercase : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) )
_lowercase : Dict = {int(snake_case ): v for k, v in idalabel.items()}
_lowercase : Any = idalabel
_lowercase : str = {v: k for k, v in idalabel.items()}
_lowercase : Dict = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
_lowercase : Tuple = {
"levit-128S": 1_28,
"levit-128": 1_28,
"levit-192": 1_92,
"levit-256": 2_56,
"levit-384": 3_84,
}
_lowercase : Optional[int] = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case )
return config, expected_shape
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_snake_case = parser.parse_args()
_snake_case = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 364 |
'''simple docstring'''
from math import sqrt
def _A ( snake_case ) -> int:
_lowercase : str = 0
for i in range(1 , int(sqrt(snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case ):
total += i + n // i
elif i == sqrt(snake_case ):
total += i
return total - n
def _A ( snake_case = 1_00_00 ) -> int:
_lowercase : List[Any] = sum(
i
for i in range(1 , snake_case )
if sum_of_divisors(sum_of_divisors(snake_case ) ) == i and sum_of_divisors(snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 199 | 0 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : int = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = emb.weight.shape
lowerCAmelCase__ : Dict = nn.Linear(A_ , A_ , bias=A_ )
lowerCAmelCase__ : int = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Optional[int] = torch.load(A_ , map_location='''cpu''' )
lowerCAmelCase__ : List[str] = Namespace(**checkpoint['''cfg''']['''model'''] )
lowerCAmelCase__ : Any = checkpoint['''model''']
remove_ignore_keys_(A_ )
lowerCAmelCase__ : Tuple = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase__ : Optional[int] = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
lowerCAmelCase__ : str = XGLMConfig(
vocab_size=A_ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase__ : List[Any] = XGLMForCausalLM(A_ )
lowerCAmelCase__ : Optional[Any] = model.load_state_dict(A_ , strict=A_ )
print(A_ )
lowerCAmelCase__ : Dict = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__UpperCamelCase : Dict = parser.parse_args()
__UpperCamelCase : int = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 106 |
"""simple docstring"""
__UpperCamelCase : Optional[Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
__UpperCamelCase : Tuple = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
__UpperCamelCase : Optional[int] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
__UpperCamelCase : Tuple = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
__UpperCamelCase : str = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
__UpperCamelCase : Dict = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
__UpperCamelCase : int = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
__UpperCamelCase : Optional[int] = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 106 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = """▁"""
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
_SCREAMING_SNAKE_CASE = {
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Dict = VOCAB_FILES_NAMES
__magic_name__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__: Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _A : str , _A : Dict="</s>" , _A : Union[str, Any]="<unk>" , _A : Dict=[] , _A : Optional[Dict[str, Any]] = None , **_A : Tuple , ) -> None:
"""simple docstring"""
snake_case_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_A , unk_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
snake_case_ : List[str] = vocab_file
snake_case_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self : Tuple ) -> Dict[str, int]:
"""simple docstring"""
snake_case_ : int = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> str:
"""simple docstring"""
snake_case_ : Tuple = self.__dict__.copy()
snake_case_ : List[str] = None
return state
def __setstate__( self : Dict , _A : List[Any] ) -> Any:
"""simple docstring"""
snake_case_ : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case_ : Optional[Any] = {}
snake_case_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : Union[str, Any] , _A : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_A , out_type=_A )
def UpperCAmelCase_ ( self : Union[str, Any] , _A : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.sp_model.piece_to_id(_A )
def UpperCAmelCase_ ( self : Dict , _A : Tuple ) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
snake_case_ : Optional[Any] = self.sp_model.IdToPiece(_A )
return token
def UpperCAmelCase_ ( self : List[Any] , _A : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = []
snake_case_ : List[str] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_A ) + token
snake_case_ : Union[str, Any] = []
else:
current_sub_tokens.append(_A )
out_string += self.sp_model.decode(_A )
return out_string.strip()
def UpperCAmelCase_ ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ : Optional[int] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , 'wb' ) as fi:
snake_case_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 363 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 | 0 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_lowerCamelCase : Dict = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class lowercase ( unittest.TestCase):
def a_ ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : bool , _lowerCamelCase : str = None , _lowerCamelCase : list = None ):
"""simple docstring"""
A_ : List[Any] = None
A_ : Optional[int] = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
A_ : Optional[Any] = os.path.abspath('''examples''' )
for item in os.listdir(_lowerCamelCase ):
if item not in EXCLUDE_EXAMPLES:
A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowerCamelCase , feature_script=_lowerCamelCase , tested_section='''main()''' if parser_only else '''training_function()''' , ):
A_ : Optional[int] = compare_against_test(
os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : str = '''\n'''.join(_lowerCamelCase )
if special_strings is not None:
for string in special_strings:
A_ : Optional[Any] = diff.replace(_lowerCamelCase , '''''' )
self.assertEqual(_lowerCamelCase , '''''' )
def a_ ( self : Dict ):
"""simple docstring"""
self.one_complete_example('''complete_nlp_example.py''' , _lowerCamelCase )
self.one_complete_example('''complete_nlp_example.py''' , _lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
A_ : List[str] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
A_ : List[Any] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.one_complete_example('''complete_cv_example.py''' , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""})
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Any = False
@classmethod
def a_ ( cls : Union[str, Any] ):
"""simple docstring"""
super().setUpClass()
A_ : str = tempfile.mkdtemp()
A_ : Union[str, Any] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
A_ : List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def a_ ( cls : str ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def a_ ( self : Optional[int] ):
"""simple docstring"""
A_ : Any = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def a_ ( self : Any ):
"""simple docstring"""
A_ : Any = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
A_ : int = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
A_ : Dict = run_command(self._launch_args + testargs , return_stdout=_lowerCamelCase )
self.assertNotIn('''epoch 0:''' , _lowerCamelCase )
self.assertIn('''epoch 1:''' , _lowerCamelCase )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Dict = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
A_ : List[str] = run_command(self._launch_args + testargs , return_stdout=_lowerCamelCase )
if torch.cuda.is_available():
A_ : Optional[int] = torch.cuda.device_count()
else:
A_ : List[str] = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , _lowerCamelCase )
self.assertIn('''epoch 1:''' , _lowerCamelCase )
else:
self.assertIn('''epoch 0:''' , _lowerCamelCase )
self.assertIn('''epoch 1:''' , _lowerCamelCase )
@slow
def a_ ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
A_ : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=_lowerCamelCase )
A_ : Dict = re.findall('''({.+})''' , _lowerCamelCase )
A_ : Tuple = [r for r in results if '''accuracy''' in r][-1]
A_ : List[Any] = ast.literal_eval(_lowerCamelCase )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def a_ ( self : str ):
"""simple docstring"""
A_ : List[str] = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def a_ ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
A_ : List[Any] = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''tracking''' ) ) )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Dict = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 167 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase_ ( _UpperCAmelCase = "" ):
"""simple docstring"""
A_ : Optional[int] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
A_ : str = BeautifulSoup(requests.get(_UpperCAmelCase ).text , '''html.parser''' )
A_ : List[Any] = soup.find_all('''td''' , attrs='''titleColumn''' )
A_ : List[str] = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_UpperCAmelCase , _UpperCAmelCase )
}
def lowercase_ ( _UpperCAmelCase = "IMDb_Top_250_Movies.csv" ):
"""simple docstring"""
A_ : Any = get_imdb_top_aaa_movies()
with open(_UpperCAmelCase , '''w''' , newline='''''' ) as out_file:
A_ : List[Any] = csv.writer(_UpperCAmelCase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 167 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def UpperCAmelCase_ ( *A_ , **A_ )-> str:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
lowerCAmelCase_ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
UpperCamelCase = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def UpperCAmelCase_ ( self , A_ , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = object_detector(examples[0] , threshold=0.0 )
UpperCamelCase = len(A_ )
self.assertGreater(A_ , 0 )
self.assertEqual(
A_ , [
{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
}
for i in range(A_ )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
pass
@require_torch
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
UpperCamelCase = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
] , )
UpperCamelCase = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
] , )
@require_torch
@slow
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = pipeline('zero-shot-object-detection' )
UpperCamelCase = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
] , )
UpperCamelCase = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@require_torch
@slow
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = 0.2
UpperCamelCase = pipeline('zero-shot-object-detection' )
UpperCamelCase = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=A_ , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
] , )
@require_torch
@slow
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = 2
UpperCamelCase = pipeline('zero-shot-object-detection' )
UpperCamelCase = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=A_ , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
] , )
| 352 |
'''simple docstring'''
from itertools import product
def A_( A : int , A : int):
UpperCamelCase = sides_number
UpperCamelCase = max_face_number * dice_number
UpperCamelCase = [0] * (max_total + 1)
UpperCamelCase = 1
UpperCamelCase = range(A , max_face_number + 1)
for dice_numbers in product(A , repeat=A):
UpperCamelCase = sum(A)
totals_frequencies[total] += 1
return totals_frequencies
def A_( ):
UpperCamelCase = total_frequency_distribution(
sides_number=4 , dice_number=9)
UpperCamelCase = total_frequency_distribution(
sides_number=6 , dice_number=6)
UpperCamelCase = 0
UpperCamelCase = 9
UpperCamelCase = 4 * 9
UpperCamelCase = 6
for peter_total in range(A , max_peter_total + 1):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total])
UpperCamelCase = (4**9) * (6**6)
UpperCamelCase = peter_wins_count / total_games_number
UpperCamelCase = round(A , ndigits=7)
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 251 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__a = TypeVar("T")
class lowerCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self: Optional[int] , snake_case: list[T] , snake_case: Callable[[T, T], T] ) -> None:
snake_case_ :Any | T = None
snake_case_ :int = len(snake_case )
snake_case_ :list[T] = [any_type for _ in range(self.N )] + arr
snake_case_ :Union[str, Any] = fnc
self.build()
def lowerCAmelCase_ ( self: Tuple ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
snake_case_ :str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCAmelCase_ ( self: Optional[int] , snake_case: int , snake_case: T ) -> None:
p += self.N
snake_case_ :Tuple = v
while p > 1:
snake_case_ :Any = p // 2
snake_case_ :str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCAmelCase_ ( self: str , snake_case: int , snake_case: int ) -> T | None: # noqa: E741
snake_case_, snake_case_ :Tuple = l + self.N, r + self.N
snake_case_ :T | None = None
while l <= r:
if l % 2 == 1:
snake_case_ :Tuple = self.st[l] if res is None else self.fn(snake_case , self.st[l] )
if r % 2 == 0:
snake_case_ :Optional[Any] = self.st[r] if res is None else self.fn(snake_case , self.st[r] )
snake_case_, snake_case_ :Dict = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__a = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__a = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__a = SegmentTree(test_array, min)
__a = SegmentTree(test_array, max)
__a = SegmentTree(test_array, lambda a, b: a + b)
def A_ ( ):
'''simple docstring'''
for i in range(len(_lowercase ) ):
for j in range(_lowercase, len(_lowercase ) ):
snake_case_ :Tuple = reduce(_lowercase, test_array[i : j + 1] )
snake_case_ :Union[str, Any] = reduce(_lowercase, test_array[i : j + 1] )
snake_case_ :Optional[Any] = reduce(lambda _lowercase, _lowercase : a + b, test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_lowercase, _lowercase )
assert max_range == max_segment_tree.query(_lowercase, _lowercase )
assert sum_range == sum_segment_tree.query(_lowercase, _lowercase )
test_all_segments()
for index, value in test_updates.items():
__a = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 66 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Tuple = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = b.T
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 )
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 )
__UpperCAmelCase = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = x.reshape(-1 , 3 )
__UpperCAmelCase = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return np.argmin(SCREAMING_SNAKE_CASE , axis=1 )
class A_ ( _a ):
'''simple docstring'''
a__ = ["pixel_values"]
def __init__(self , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = True , **lowercase__ , ) -> None:
super().__init__(**lowercase__ )
__UpperCAmelCase = size if size is not None else {'''height''': 256, '''width''': 256}
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = np.array(lowercase__ ) if clusters is not None else None
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_normalize
__UpperCAmelCase = do_color_quantize
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase__ , size=(size['''height'''], size['''width''']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__UpperCAmelCase = rescale(image=lowercase__ , scale=1 / 127.5 , data_format=lowercase__ )
__UpperCAmelCase = image - 1
return image
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> PIL.Image.Image:
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase = clusters if clusters is not None else self.clusters
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=lowercase__ ) for image in images]
if do_color_quantize:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = color_quantize(lowercase__ , lowercase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase = images.shape[0]
__UpperCAmelCase = images.reshape(lowercase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase = list(lowercase__ )
else:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__UpperCAmelCase = {'''input_ids''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 333 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : int) -> Any:
'''simple docstring'''
__UpperCamelCase : Tuple = XCLIPTextConfig()
# derive patch size from model name
__UpperCamelCase : Any = model_name.find("patch")
__UpperCamelCase : Dict = int(model_name[start_idx + len("patch") : start_idx + len("patch") + 2])
__UpperCamelCase : List[str] = XCLIPVisionConfig(patch_size=_lowerCamelCase , num_frames=_lowerCamelCase)
if "large" in model_name:
__UpperCamelCase : Any = 768
__UpperCamelCase : List[str] = 3_072
__UpperCamelCase : Dict = 12
__UpperCamelCase : List[Any] = 1_024
__UpperCamelCase : int = 4_096
__UpperCamelCase : List[Any] = 16
__UpperCamelCase : List[Any] = 24
__UpperCamelCase : Tuple = 768
__UpperCamelCase : Union[str, Any] = 3_072
if model_name == "xclip-large-patch14-16-frames":
__UpperCamelCase : List[Any] = 336
__UpperCamelCase : List[Any] = XCLIPConfig.from_text_vision_configs(_lowerCamelCase , _lowerCamelCase)
if "large" in model_name:
__UpperCamelCase : str = 768
return config
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> Tuple:
'''simple docstring'''
if name == "token_embedding.weight":
__UpperCamelCase : Tuple = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight")
if name == "positional_embedding":
__UpperCamelCase : Dict = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight")
if "ln_1" in name:
__UpperCamelCase : Union[str, Any] = name.replace("ln_1" , "layer_norm1")
if "ln_2" in name:
__UpperCamelCase : Tuple = name.replace("ln_2" , "layer_norm2")
if "c_fc" in name:
__UpperCamelCase : Dict = name.replace("c_fc" , "fc1")
if "c_proj" in name:
__UpperCamelCase : str = name.replace("c_proj" , "fc2")
if name.startswith("transformer.resblocks"):
__UpperCamelCase : str = name.replace("transformer.resblocks" , "text_model.encoder.layers")
if "attn.out_proj" in name and "message" not in name:
__UpperCamelCase : int = name.replace("attn.out_proj" , "self_attn.out_proj")
if "ln_final" in name:
__UpperCamelCase : Union[str, Any] = name.replace("ln_final" , "text_model.final_layer_norm")
# visual encoder
if name == "visual.class_embedding":
__UpperCamelCase : List[Any] = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding")
if name == "visual.positional_embedding":
__UpperCamelCase : List[str] = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight")
if name.startswith("visual.transformer.resblocks"):
__UpperCamelCase : int = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers")
if "visual.conv1" in name:
__UpperCamelCase : List[Any] = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding")
if "visual.ln_pre" in name:
__UpperCamelCase : Union[str, Any] = name.replace("visual.ln_pre" , "vision_model.pre_layernorm")
if "visual.ln_post" in name:
__UpperCamelCase : Tuple = name.replace("visual.ln_post" , "vision_model.post_layernorm")
if "visual.proj" in name:
__UpperCamelCase : Optional[Any] = name.replace("visual.proj" , "visual_projection.weight")
if "text_projection" in name:
__UpperCamelCase : List[Any] = name.replace("text_projection" , "text_projection.weight")
# things on top
if "prompts_visual_proj" in name:
__UpperCamelCase : int = name.replace("prompts_visual_proj" , "prompts_visual_projection")
if "prompts_visual_ln" in name:
__UpperCamelCase : str = name.replace("prompts_visual_ln" , "prompts_visual_layernorm")
# mit
if name == "mit.positional_embedding":
__UpperCamelCase : Dict = name.replace("positional" , "position")
if name.startswith("mit.resblocks"):
__UpperCamelCase : str = name.replace("mit.resblocks" , "mit.encoder.layers")
# prompts generator
if name.startswith("prompts_generator.norm"):
__UpperCamelCase : Any = name.replace("prompts_generator.norm" , "prompts_generator.layernorm")
return name
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]) -> str:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__UpperCamelCase : Union[str, Any] = orig_state_dict.pop(_lowerCamelCase)
if "attn.in_proj" in key:
__UpperCamelCase : Optional[int] = key.split(".")
if key.startswith("visual"):
__UpperCamelCase : Optional[Any] = key_split[3]
__UpperCamelCase : List[str] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__UpperCamelCase : str = val[
:dim, :
]
__UpperCamelCase : Tuple = val[
dim : dim * 2, :
]
__UpperCamelCase : str = val[
-dim:, :
]
else:
__UpperCamelCase : str = val[
:dim
]
__UpperCamelCase : Optional[Any] = val[
dim : dim * 2
]
__UpperCamelCase : List[str] = val[
-dim:
]
else:
if "weight" in key:
__UpperCamelCase : Optional[Any] = val[
:dim, :
]
__UpperCamelCase : Optional[int] = val[
dim : dim * 2, :
]
__UpperCamelCase : Union[str, Any] = val[
-dim:, :
]
else:
__UpperCamelCase : Optional[Any] = val[:dim]
__UpperCamelCase : Optional[int] = val[
dim : dim * 2
]
__UpperCamelCase : List[str] = val[-dim:]
elif key.startswith("mit"):
__UpperCamelCase : List[Any] = key_split[2]
__UpperCamelCase : Optional[int] = config.vision_config.mit_hidden_size
if "weight" in key:
__UpperCamelCase : Any = val[:dim, :]
__UpperCamelCase : Optional[Any] = val[dim : dim * 2, :]
__UpperCamelCase : Dict = val[-dim:, :]
else:
__UpperCamelCase : Optional[int] = val[:dim]
__UpperCamelCase : Optional[int] = val[dim : dim * 2]
__UpperCamelCase : Dict = val[-dim:]
else:
__UpperCamelCase : List[str] = key_split[2]
__UpperCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
__UpperCamelCase : Optional[Any] = val[:dim, :]
__UpperCamelCase : Union[str, Any] = val[
dim : dim * 2, :
]
__UpperCamelCase : Optional[Any] = val[-dim:, :]
else:
__UpperCamelCase : Any = val[:dim]
__UpperCamelCase : Union[str, Any] = val[
dim : dim * 2
]
__UpperCamelCase : Optional[int] = val[-dim:]
else:
__UpperCamelCase : Dict = rename_key(_lowerCamelCase)
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__UpperCamelCase : List[Any] = val.T
__UpperCamelCase : Any = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any]) -> Tuple:
'''simple docstring'''
if num_frames == 8:
__UpperCamelCase : Any = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
__UpperCamelCase : Any = "eating_spaghetti.npy"
elif num_frames == 32:
__UpperCamelCase : Tuple = "eating_spaghetti_32_frames.npy"
__UpperCamelCase : Optional[int] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=_lowerCamelCase , repo_type="dataset" , )
__UpperCamelCase : Optional[Any] = np.load(_lowerCamelCase)
return list(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[Any]=False) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Any = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
__UpperCamelCase : Union[str, Any] = model_to_url[model_name]
__UpperCamelCase : Tuple = 8
if "16-frames" in model_name:
__UpperCamelCase : Optional[int] = 16
elif "shot" in model_name:
__UpperCamelCase : List[Any] = 32
__UpperCamelCase : Optional[int] = get_xclip_config(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : Optional[Any] = XCLIPModel(_lowerCamelCase)
model.eval()
if "drive" in checkpoint_url:
__UpperCamelCase : str = "pytorch_model.bin"
gdown.cached_download(_lowerCamelCase , _lowerCamelCase , quiet=_lowerCamelCase)
__UpperCamelCase : Optional[Any] = torch.load(_lowerCamelCase , map_location="cpu")["model"]
else:
__UpperCamelCase : Optional[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase)["model"]
__UpperCamelCase : Dict = convert_state_dict(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : Dict = XCLIPModel(_lowerCamelCase)
__UpperCamelCase : Dict = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase)
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__UpperCamelCase : str = 336 if model_name == "xclip-large-patch14-16-frames" else 224
__UpperCamelCase : str = VideoMAEImageProcessor(size=_lowerCamelCase)
__UpperCamelCase : Any = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
__UpperCamelCase : Optional[int] = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32")
__UpperCamelCase : Optional[Any] = XCLIPProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase)
__UpperCamelCase : str = prepare_video(_lowerCamelCase)
__UpperCamelCase : Tuple = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=_lowerCamelCase , return_tensors="pt" , padding=_lowerCamelCase)
print("Shape of pixel values:" , inputs.pixel_values.shape)
with torch.no_grad():
__UpperCamelCase : Optional[int] = model(**_lowerCamelCase)
# Verify outputs
__UpperCamelCase : Union[str, Any] = outputs.logits_per_video
__UpperCamelCase : List[str] = logits_per_video.softmax(dim=1)
print("Probs:" , _lowerCamelCase)
# kinetics-400
if model_name == "xclip-base-patch32":
__UpperCamelCase : int = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]])
elif model_name == "xclip-base-patch32-16-frames":
__UpperCamelCase : List[str] = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]])
elif model_name == "xclip-base-patch16":
__UpperCamelCase : Any = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]])
elif model_name == "xclip-base-patch16-16-frames":
__UpperCamelCase : int = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]])
elif model_name == "xclip-large-patch14":
__UpperCamelCase : Optional[Any] = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]])
elif model_name == "xclip-large-patch14-16-frames":
__UpperCamelCase : List[str] = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]])
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__UpperCamelCase : Dict = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]])
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__UpperCamelCase : int = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]])
elif model_name == "xclip-large-patch14-kinetics-600":
__UpperCamelCase : List[Any] = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]])
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__UpperCamelCase : Union[str, Any] = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]])
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__UpperCamelCase : Dict = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]])
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__UpperCamelCase : Any = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]])
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__UpperCamelCase : Optional[int] = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]])
elif model_name == "xclip-base-patch16-ucf-2-shot":
__UpperCamelCase : Tuple = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]])
elif model_name == "xclip-base-patch16-ucf-4-shot":
__UpperCamelCase : Union[str, Any] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]])
elif model_name == "xclip-base-patch16-ucf-8-shot":
__UpperCamelCase : List[str] = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]])
elif model_name == "xclip-base-patch16-ucf-16-shot":
__UpperCamelCase : Any = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]])
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__UpperCamelCase : int = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]])
else:
raise ValueError(F'Model name {model_name} not supported')
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}')
model.save_pretrained(_lowerCamelCase)
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub...")
model.push_to_hub(_lowerCamelCase , organization="nielsr")
processor.push_to_hub(_lowerCamelCase , organization="nielsr")
slow_tokenizer.push_to_hub(_lowerCamelCase , organization="nielsr")
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowercase : Any = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 351 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]) -> Any:
'''simple docstring'''
__UpperCamelCase : str = set()
__UpperCamelCase : Optional[Any] = []
def parse_line(_lowerCamelCase : Tuple):
for line in fp:
if isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Tuple = line.decode("UTF-8")
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" "):
# process a single warning and move it to `selected_warnings`.
if len(_lowerCamelCase) > 0:
__UpperCamelCase : Optional[Any] = "\n".join(_lowerCamelCase)
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets):
selected_warnings.add(_lowerCamelCase)
buffer.clear()
continue
else:
__UpperCamelCase : Optional[Any] = line.strip()
buffer.append(_lowerCamelCase)
if from_gh:
for filename in os.listdir(_lowerCamelCase):
__UpperCamelCase : Any = os.path.join(_lowerCamelCase , _lowerCamelCase)
if not os.path.isdir(_lowerCamelCase):
# read the file
if filename != "warnings.txt":
continue
with open(_lowerCamelCase) as fp:
parse_line(_lowerCamelCase)
else:
try:
with zipfile.ZipFile(_lowerCamelCase) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowerCamelCase) as fp:
parse_line(_lowerCamelCase)
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.')
return selected_warnings
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int]) -> Dict:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = set()
__UpperCamelCase : str = [os.path.join(_lowerCamelCase , _lowerCamelCase) for p in os.listdir(_lowerCamelCase) if (p.endswith(".zip") or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase))
return selected_warnings
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple) -> str:
'''simple docstring'''
return values.split(",")
lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
lowercase : Union[str, Any] = parser.parse_args()
lowercase : Tuple = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase : Any = extract_warnings(args.output_dir, args.targets)
lowercase : int = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 151 | 0 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 109 |
'''simple docstring'''
from __future__ import annotations
import math
a_ = '2020.9.26'
a_ = 'xcodz-dot, cclaus, dhruvmanila'
def _a( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float ):
'''simple docstring'''
if not all(isinstance(UpperCamelCase__, (float, int) ) for val in locals().values() ):
SCREAMING_SNAKE_CASE__ : int =f"Input values must either be float or int: {list(locals().values() )}"
raise TypeError(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =((x * distance) / (z + distance)) * scale
SCREAMING_SNAKE_CASE__ : Tuple =((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _a( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : str, UpperCamelCase__ : float ):
'''simple docstring'''
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError('''Axis must be a str''' )
SCREAMING_SNAKE_CASE__ : List[Any] =locals()
del input_variables["axis"]
if not all(isinstance(UpperCamelCase__, (float, int) ) for val in input_variables.values() ):
SCREAMING_SNAKE_CASE__ : List[str] =(
'''Input values except axis must either be float or int: '''
f"{list(input_variables.values() )}"
)
raise TypeError(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =(angle % 3_6_0) / 4_5_0 * 1_8_0 / math.pi
if axis == "z":
SCREAMING_SNAKE_CASE__ : str =x * math.cos(UpperCamelCase__ ) - y * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =y * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =z
elif axis == "x":
SCREAMING_SNAKE_CASE__ : Dict =y * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =z * math.cos(UpperCamelCase__ ) + y * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =x
elif axis == "y":
SCREAMING_SNAKE_CASE__ : Tuple =x * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =z * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(F'''{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }''') | 152 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
if not postfix_notation:
return 0
lowerCAmelCase__ :int = {'+', '-', '*', '/'}
lowerCAmelCase__ :list[Any] = []
for token in postfix_notation:
if token in operations:
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_SCREAMING_SNAKE_CASE ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) ->str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
lowerCAmelCase__ :Optional[int] = quote(_SCREAMING_SNAKE_CASE )
return hfh.hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' , revision=_SCREAMING_SNAKE_CASE )
| 254 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 247 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''openai/whisper-base'''
lowercase__ = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
lowercase__ = '''transcriber'''
lowercase__ = WhisperProcessor
lowercase__ = WhisperForConditionalGeneration
lowercase__ = ['''audio''']
lowercase__ = ['''text''']
def __magic_name__ ( self : Optional[int] , snake_case_ : Dict ) -> int:
'''simple docstring'''
return self.pre_processor(snake_case_ , return_tensors="pt" ).input_features
def __magic_name__ ( self : int , snake_case_ : int ) -> str:
'''simple docstring'''
return self.model.generate(inputs=snake_case_ )
def __magic_name__ ( self : Tuple , snake_case_ : Optional[Any] ) -> Any:
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )[0]
| 247 | 1 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 184 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if any(not isinstance(UpperCamelCase , UpperCamelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(UpperCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(UpperCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 184 | 1 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if not is_accelerate_available():
return method
_SCREAMING_SNAKE_CASE : Optional[Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(SCREAMING_SNAKE_CASE__ ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return wrapper
| 200 |
'''simple docstring'''
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case = "" , __snake_case = False ):
# Mapping from the first character of the prefix of the node
_SCREAMING_SNAKE_CASE : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
_SCREAMING_SNAKE_CASE : List[Any] = is_leaf
_SCREAMING_SNAKE_CASE : Optional[Any] = prefix
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for q, w in zip(self.prefix , __snake_case ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self , __snake_case ):
for word in words:
self.insert(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
_SCREAMING_SNAKE_CASE : List[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_SCREAMING_SNAKE_CASE : List[str] = RadixNode(prefix=__snake_case , is_leaf=__snake_case )
else:
_SCREAMING_SNAKE_CASE : int = self.nodes[word[0]]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = incoming_node.match(
__snake_case )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__snake_case )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = remaining_prefix
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.nodes[matching_string[0]]
_SCREAMING_SNAKE_CASE : List[Any] = RadixNode(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = aux_node
if remaining_word == "":
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
self.nodes[matching_string[0]].insert(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.nodes.get(word[0] , __snake_case )
if not incoming_node:
return False
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = self.nodes.get(word[0] , __snake_case )
if not incoming_node:
return False
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__snake_case )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_SCREAMING_SNAKE_CASE : Optional[Any] = list(self.nodes.values() )[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
_SCREAMING_SNAKE_CASE : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_SCREAMING_SNAKE_CASE : List[str] = False
# If there is 1 edge, we merge it with its child
else:
_SCREAMING_SNAKE_CASE : int = list(incoming_node.nodes.values() )[0]
_SCREAMING_SNAKE_CASE : Tuple = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_SCREAMING_SNAKE_CASE : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self , __snake_case = 0 ):
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """banana bananas bandana band apple all beast""".split()
_SCREAMING_SNAKE_CASE : Optional[Any] = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE__ )
assert all(root.find(SCREAMING_SNAKE_CASE__ ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def snake_case_ ( ):
"""simple docstring"""
assert test_trie()
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = RadixNode()
_SCREAMING_SNAKE_CASE : Optional[int] = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(SCREAMING_SNAKE_CASE__ )
print("""Words:""" , SCREAMING_SNAKE_CASE__ )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 200 | 1 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = """"""
SCREAMING_SNAKE_CASE_ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
SCREAMING_SNAKE_CASE_ : str = None # compression type in fsspec. ex: "gzip"
SCREAMING_SNAKE_CASE_ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self ,_SCREAMING_SNAKE_CASE = "" ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ) -> int:
super().__init__(self ,**_SCREAMING_SNAKE_CASE )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_snake_case = fsspec.open(
_SCREAMING_SNAKE_CASE ,mode="rb" ,protocol=_SCREAMING_SNAKE_CASE ,compression=self.compression ,client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
_snake_case = os.path.basename(self.file.path.split("::" )[0] )
_snake_case = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
_snake_case = None
@classmethod
def _lowercase ( cls ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_SCREAMING_SNAKE_CASE ).lstrip("/" )
def _lowercase ( self ) -> Optional[int]:
if self.dir_cache is None:
_snake_case = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
_snake_case = {f["name"]: f}
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
return self.file.open().read()
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = "rb" ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Union[str, Any]:
_snake_case = self._strip_protocol(_SCREAMING_SNAKE_CASE )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = """bz2"""
SCREAMING_SNAKE_CASE_ : Optional[int] = """bz2"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = """.bz2"""
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """gzip"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """gzip"""
SCREAMING_SNAKE_CASE_ : Any = """.gz"""
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = """lz4"""
SCREAMING_SNAKE_CASE_ : str = """lz4"""
SCREAMING_SNAKE_CASE_ : int = """.lz4"""
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = """xz"""
SCREAMING_SNAKE_CASE_ : Optional[int] = """xz"""
SCREAMING_SNAKE_CASE_ : List[str] = """.xz"""
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = """zstd"""
SCREAMING_SNAKE_CASE_ : str = """zstd"""
SCREAMING_SNAKE_CASE_ : Any = """.zst"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = "rb" ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = DEFAULT_BLOCK_SIZE ,**_SCREAMING_SNAKE_CASE ,) -> str:
super().__init__(
fo=_SCREAMING_SNAKE_CASE ,mode=_SCREAMING_SNAKE_CASE ,target_protocol=_SCREAMING_SNAKE_CASE ,target_options=_SCREAMING_SNAKE_CASE ,block_size=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_snake_case = self.file.__enter__
class _a :
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> str:
_snake_case = file_
def __enter__( self ) -> Optional[Any]:
self._file.__enter__()
return self
def __exit__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Tuple:
self._file.__exit__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _lowercase ( self ) -> str:
return next(self._file )
def __getattr__( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
return getattr(self._file ,_SCREAMING_SNAKE_CASE )
def fixed_enter(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ):
return WrappedFile(_enter(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) )
_snake_case = fixed_enter
| 142 |
'''simple docstring'''
def __a ( _UpperCamelCase: int ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
_snake_case = len(bin(_UpperCamelCase )[3:] )
_snake_case = bin(abs(_UpperCamelCase ) - (1 << binary_number_length) )[3:]
_snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCamelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 | 1 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCAmelCase = """\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"""
UpperCAmelCase = """\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"""
UpperCAmelCase = """\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCamelCase ( self : Tuple ) -> str:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def _UpperCamelCase ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple = False , __UpperCamelCase : Optional[int] = False , __UpperCamelCase : List[Any] = False , __UpperCamelCase : int = False , ) -> Optional[Any]:
_UpperCamelCase = len(references[0] )
if any(len(A_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_UpperCamelCase = [[refs[i] for refs in references] for i in range(A_ )]
_UpperCamelCase = TER(
normalized=A_ , no_punct=A_ , asian_support=A_ , case_sensitive=A_ , )
_UpperCamelCase = sb_ter.corpus_score(A_ , A_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 256 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _lowercase ( lowercase__ ):
__lowerCAmelCase : str = []
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : str = []
for rt in rc.restypes:
__lowerCAmelCase : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__lowerCAmelCase : List[str] = {name: i for i, name in enumerate(lowercase__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
__lowerCAmelCase : List[Any] = torch.tensor(
lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__lowerCAmelCase : Optional[Any] = torch.tensor(
lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__lowerCAmelCase : Tuple = torch.tensor(
lowercase__ , dtype=torch.floataa , device=protein['''aatype'''].device , )
__lowerCAmelCase : List[Any] = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__lowerCAmelCase : Any = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase : int = residx_atomaa_mask
__lowerCAmelCase : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__lowerCAmelCase : int = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase : Union[str, Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__lowerCAmelCase : str = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
__lowerCAmelCase : Optional[int] = rc.restype_atoa[restype_letter]
__lowerCAmelCase : Optional[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__lowerCAmelCase : str = rc.atom_order[atom_name]
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase : Any = residx_atomaa_mask
return protein
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = tree_map(lambda lowercase__ : torch.tensor(lowercase__ , device=batch['''aatype'''].device ) , lowercase__ , np.ndarray )
__lowerCAmelCase : Tuple = tensor_tree_map(lambda lowercase__ : np.array(lowercase__ ) , make_atomaa_masks(lowercase__ ) )
return out
| 275 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
A = 'Usage of script: script_name <size_of_canvas:int>'
A = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def __lowerCAmelCase ( a__ ) -> Dict:
__a = [[False for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
return canvas
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
for i, row in enumerate(UpperCamelCase__ ):
for j, _ in enumerate(UpperCamelCase__ ):
__a = bool(random.getrandbits(1 ) )
def __lowerCAmelCase ( a__ ) -> Union[str, Any]:
__a = np.array(UpperCamelCase__ )
__a = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(UpperCamelCase__ ):
for c, pt in enumerate(UpperCamelCase__ ):
__a = __judge_point(
UpperCamelCase__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__a = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__a = current_canvas.tolist()
return return_canvas
def __lowerCAmelCase ( a__ , a__ ) -> Dict:
__a = 0
__a = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__a = pt
if pt:
if alive < 2:
__a = False
elif alive == 2 or alive == 3:
__a = True
elif alive > 3:
__a = False
else:
if alive == 3:
__a = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
A = int(sys.argv[1])
# main working structure of this module.
A = create_canvas(canvas_size)
seed(c)
A , A = plt.subplots()
fig.show()
A = ListedColormap(['w', 'k'])
try:
while True:
A = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass | 362 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Dict = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 33 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
# TODO Update this
snake_case : Union[str, Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'esm'
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1026 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase="absolute" , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , mask_token_id=_lowerCamelCase , **_lowerCamelCase )
a :Tuple = vocab_size
a :List[str] = hidden_size
a :int = num_hidden_layers
a :int = num_attention_heads
a :Union[str, Any] = intermediate_size
a :Union[str, Any] = hidden_dropout_prob
a :Any = attention_probs_dropout_prob
a :List[Any] = max_position_embeddings
a :str = initializer_range
a :Tuple = layer_norm_eps
a :Union[str, Any] = position_embedding_type
a :List[str] = use_cache
a :str = emb_layer_norm_before
a :List[str] = token_dropout
a :str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
a :Optional[Any] = EsmFoldConfig()
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
a :Dict = EsmFoldConfig(**_lowerCamelCase )
a :Optional[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
a :str = get_default_vocab_list()
else:
a :Dict = vocab_list
else:
a :Tuple = None
a :List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , _lowerCamelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = super().to_dict()
if isinstance(self.esmfold_config , _lowerCamelCase ):
a :Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class _snake_case :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def SCREAMING_SNAKE_CASE__ ( self ):
if self.trunk is None:
a :List[str] = TrunkConfig()
elif isinstance(self.trunk , _lowerCamelCase ):
a :List[Any] = TrunkConfig(**self.trunk )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = asdict(self )
a :Any = self.trunk.to_dict()
return output
@dataclass
class _snake_case :
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 1024
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def SCREAMING_SNAKE_CASE__ ( self ):
if self.structure_module is None:
a :List[Any] = StructureModuleConfig()
elif isinstance(self.structure_module , _lowerCamelCase ):
a :Optional[int] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
a :Tuple = self.sequence_state_dim // self.sequence_head_width
a :Union[str, Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = asdict(self )
a :Dict = self.structure_module.to_dict()
return output
@dataclass
class _snake_case :
SCREAMING_SNAKE_CASE__ = 384
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 1e-8
SCREAMING_SNAKE_CASE__ = 1e5
def SCREAMING_SNAKE_CASE__ ( self ):
return asdict(self )
def __lowerCamelCase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 94 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
snake_case : Union[str, Any] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
require_version(deps[pkg] , UpperCAmelCase_ )
| 94 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Dict =logging.get_logger(__name__)
_lowercase : Any ={
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class snake_case__ (a_ ):
"""simple docstring"""
__lowerCAmelCase :Tuple = "switch_transformers"
__lowerCAmelCase :Tuple = ["past_key_values"]
__lowerCAmelCase :Any = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , __lowercase=3_2_1_2_8 , __lowercase=7_6_8 , __lowercase=6_4 , __lowercase=2_0_4_8 , __lowercase=6_4 , __lowercase=1_2 , __lowercase=3 , __lowercase=1_2 , __lowercase=3 , __lowercase=1_2 , __lowercase=8 , __lowercase=False , __lowercase=0.0_1 , __lowercase="float32" , __lowercase=False , __lowercase=3_2 , __lowercase=1_2_8 , __lowercase=0.1 , __lowercase=1E-6 , __lowercase=0.0_0_1 , __lowercase=0.0_0_1 , __lowercase=1.0 , __lowercase="relu" , __lowercase=True , __lowercase=False , __lowercase=True , __lowercase=0 , __lowercase=1 , **__lowercase , ) -> str:
"""simple docstring"""
a__ : Union[str, Any] = vocab_size
a__ : Dict = d_model
a__ : Optional[int] = d_kv
a__ : List[str] = d_ff
a__ : str = num_sparse_encoder_layers
a__ : Tuple = num_layers
a__ : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a__ : Any = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a__ : Optional[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
a__ : Optional[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a__ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a__ : int = self.num_decoder_layers # HACK: this will create 0 sparse layers
a__ : Any = num_heads
a__ : Any = num_experts
a__ : Optional[int] = expert_capacity
a__ : int = router_bias
a__ : Optional[int] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
a__ : int = router_dtype
a__ : Any = router_ignore_padding_tokens
a__ : Union[str, Any] = relative_attention_num_buckets
a__ : Dict = relative_attention_max_distance
a__ : Any = dropout_rate
a__ : int = layer_norm_epsilon
a__ : Optional[Any] = initializer_factor
a__ : int = feed_forward_proj
a__ : Optional[Any] = use_cache
a__ : Dict = add_router_probs
a__ : Optional[int] = router_z_loss_coef
a__ : Union[str, Any] = router_aux_loss_coef
a__ : List[str] = self.feed_forward_proj.split("""-""" )
a__ : Tuple = act_info[-1]
a__ : int = act_info[0] == """gated"""
if len(__lowercase ) > 1 and act_info[0] != "gated" or len(__lowercase ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a__ : List[str] = """gelu_new"""
super().__init__(
pad_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , **__lowercase , )
| 350 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case__ (ctypes.Structure ):
"""simple docstring"""
__lowerCAmelCase :Dict = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
a__ : int = CursorInfo()
a__ : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
a__ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
elif os.name == "posix":
sys.stdout.write("""\033[?25l""")
sys.stdout.flush()
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
if os.name == "nt":
a__ : List[Any] = CursorInfo()
a__ : Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
a__ : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
elif os.name == "posix":
sys.stdout.write("""\033[?25h""")
sys.stdout.flush()
@contextmanager
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 266 | 0 |
from collections import deque
from math import floor
from random import random
from time import time
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ):
"""simple docstring"""
snake_case_ = {}
def snake_case__ ( self : List[str] , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Optional[int]=1 ):
"""simple docstring"""
if self.graph.get(__lowercase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ = [[w, v]]
if not self.graph.get(__lowercase ):
snake_case_ = []
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
return list(self.graph )
def snake_case__ ( self : List[Any] , __lowercase : Tuple , __lowercase : Union[str, Any] ):
"""simple docstring"""
if self.graph.get(__lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__lowercase )
def snake_case__ ( self : Union[str, Any] , __lowercase : str=-2 , __lowercase : Optional[int]=-1 ):
"""simple docstring"""
if s == d:
return []
snake_case_ = []
snake_case_ = []
if s == -2:
snake_case_ = list(self.graph )[0]
stack.append(__lowercase )
visited.append(__lowercase )
snake_case_ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__lowercase ) != 0:
snake_case_ = stack[len(__lowercase ) - 1]
else:
snake_case_ = ss
# check if se have reached the starting point
if len(__lowercase ) == 0:
return visited
def snake_case__ ( self : List[str] , __lowercase : Any=-1 ):
"""simple docstring"""
if c == -1:
snake_case_ = floor(random() * 1_00_00 ) + 10
for i in range(__lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
snake_case_ = floor(random() * c ) + 1
if n != i:
self.add_pair(__lowercase , __lowercase , 1 )
def snake_case__ ( self : Dict , __lowercase : Union[str, Any]=-2 ):
"""simple docstring"""
snake_case_ = deque()
snake_case_ = []
if s == -2:
snake_case_ = list(self.graph )[0]
d.append(__lowercase )
visited.append(__lowercase )
while d:
snake_case_ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case__ ( self : List[str] , __lowercase : List[Any] ):
"""simple docstring"""
snake_case_ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def snake_case__ ( self : Union[str, Any] , __lowercase : str ):
"""simple docstring"""
return len(self.graph[u] )
def snake_case__ ( self : List[str] , __lowercase : List[str]=-2 ):
"""simple docstring"""
snake_case_ = []
snake_case_ = []
if s == -2:
snake_case_ = list(self.graph )[0]
stack.append(__lowercase )
visited.append(__lowercase )
snake_case_ = s
snake_case_ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__lowercase ) != 0:
snake_case_ = stack[len(__lowercase ) - 1]
else:
snake_case_ = ss
# check if se have reached the starting point
if len(__lowercase ) == 0:
return sorted_nodes
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = []
snake_case_ = []
snake_case_ = list(self.graph )[0]
stack.append(__lowercase )
visited.append(__lowercase )
snake_case_ = -2
snake_case_ = []
snake_case_ = s
snake_case_ = False
snake_case_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ = len(__lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ = True
if len(__lowercase ) != 0:
snake_case_ = stack[len(__lowercase ) - 1]
else:
snake_case_ = False
indirect_parents.append(__lowercase )
snake_case_ = s
snake_case_ = ss
# check if se have reached the starting point
if len(__lowercase ) == 0:
return list(__lowercase )
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = []
snake_case_ = []
snake_case_ = list(self.graph )[0]
stack.append(__lowercase )
visited.append(__lowercase )
snake_case_ = -2
snake_case_ = []
snake_case_ = s
snake_case_ = False
snake_case_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ = len(__lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ = True
if len(__lowercase ) != 0:
snake_case_ = stack[len(__lowercase ) - 1]
else:
snake_case_ = False
indirect_parents.append(__lowercase )
snake_case_ = s
snake_case_ = ss
# check if se have reached the starting point
if len(__lowercase ) == 0:
return False
def snake_case__ ( self : Tuple , __lowercase : Tuple=-2 , __lowercase : Union[str, Any]=-1 ):
"""simple docstring"""
snake_case_ = time()
self.dfs(__lowercase , __lowercase )
snake_case_ = time()
return end - begin
def snake_case__ ( self : Optional[Any] , __lowercase : List[Any]=-2 ):
"""simple docstring"""
snake_case_ = time()
self.bfs(__lowercase )
snake_case_ = time()
return end - begin
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = {}
def snake_case__ ( self : List[Any] , __lowercase : int , __lowercase : str , __lowercase : Optional[Any]=1 ):
"""simple docstring"""
if self.graph.get(__lowercase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ = [[w, v]]
# add the other way
if self.graph.get(__lowercase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ = [[w, u]]
def snake_case__ ( self : Union[str, Any] , __lowercase : List[str] , __lowercase : List[Any] ):
"""simple docstring"""
if self.graph.get(__lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__lowercase )
# the other way round
if self.graph.get(__lowercase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__lowercase )
def snake_case__ ( self : List[Any] , __lowercase : List[str]=-2 , __lowercase : Tuple=-1 ):
"""simple docstring"""
if s == d:
return []
snake_case_ = []
snake_case_ = []
if s == -2:
snake_case_ = list(self.graph )[0]
stack.append(__lowercase )
visited.append(__lowercase )
snake_case_ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__lowercase ) != 0:
snake_case_ = stack[len(__lowercase ) - 1]
else:
snake_case_ = ss
# check if se have reached the starting point
if len(__lowercase ) == 0:
return visited
def snake_case__ ( self : str , __lowercase : Dict=-1 ):
"""simple docstring"""
if c == -1:
snake_case_ = floor(random() * 1_00_00 ) + 10
for i in range(__lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
snake_case_ = floor(random() * c ) + 1
if n != i:
self.add_pair(__lowercase , __lowercase , 1 )
def snake_case__ ( self : Union[str, Any] , __lowercase : Any=-2 ):
"""simple docstring"""
snake_case_ = deque()
snake_case_ = []
if s == -2:
snake_case_ = list(self.graph )[0]
d.append(__lowercase )
visited.append(__lowercase )
while d:
snake_case_ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case__ ( self : str , __lowercase : List[str] ):
"""simple docstring"""
return len(self.graph[u] )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = []
snake_case_ = []
snake_case_ = list(self.graph )[0]
stack.append(__lowercase )
visited.append(__lowercase )
snake_case_ = -2
snake_case_ = []
snake_case_ = s
snake_case_ = False
snake_case_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ = len(__lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ = True
if len(__lowercase ) != 0:
snake_case_ = stack[len(__lowercase ) - 1]
else:
snake_case_ = False
indirect_parents.append(__lowercase )
snake_case_ = s
snake_case_ = ss
# check if se have reached the starting point
if len(__lowercase ) == 0:
return list(__lowercase )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = []
snake_case_ = []
snake_case_ = list(self.graph )[0]
stack.append(__lowercase )
visited.append(__lowercase )
snake_case_ = -2
snake_case_ = []
snake_case_ = s
snake_case_ = False
snake_case_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ = len(__lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ = True
if len(__lowercase ) != 0:
snake_case_ = stack[len(__lowercase ) - 1]
else:
snake_case_ = False
indirect_parents.append(__lowercase )
snake_case_ = s
snake_case_ = ss
# check if se have reached the starting point
if len(__lowercase ) == 0:
return False
def snake_case__ ( self : Tuple ):
"""simple docstring"""
return list(self.graph )
def snake_case__ ( self : int , __lowercase : Optional[Any]=-2 , __lowercase : Dict=-1 ):
"""simple docstring"""
snake_case_ = time()
self.dfs(__lowercase , __lowercase )
snake_case_ = time()
return end - begin
def snake_case__ ( self : Optional[int] , __lowercase : List[Any]=-2 ):
"""simple docstring"""
snake_case_ = time()
self.bfs(__lowercase )
snake_case_ = time()
return end - begin
| 187 |
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = []
snake_case_ = 1
while len(_A ) < 1E6:
constant.append(str(_A ) )
i += 1
snake_case_ = "".join(_A )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 187 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Optional[Any] = (UnCLIPScheduler,)
def UpperCamelCase ( self: int , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**UpperCAmelCase_ )
return config
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCAmelCase_ , prev_timestep=UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config(variance_type="""fixed_small_log""" )
_SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config(variance_type="""learned_range""" )
_SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCAmelCase_ ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=UpperCAmelCase_ ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=UpperCAmelCase_ ) - -0.0_01_00_11 < 1E-5
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = scheduler.timesteps
_SCREAMING_SNAKE_CASE = self.dummy_model()
_SCREAMING_SNAKE_CASE = self.dummy_sample_deter
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for i, t in enumerate(UpperCAmelCase_ ):
# 1. predict noise residual
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , UpperCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
_SCREAMING_SNAKE_CASE = pred_prev_sample
_SCREAMING_SNAKE_CASE = torch.sum(torch.abs(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(25 )
_SCREAMING_SNAKE_CASE = scheduler.timesteps
_SCREAMING_SNAKE_CASE = self.dummy_model()
_SCREAMING_SNAKE_CASE = self.dummy_sample_deter
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for i, t in enumerate(UpperCAmelCase_ ):
# 1. predict noise residual
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , UpperCAmelCase_ )
if i + 1 == timesteps.shape[0]:
_SCREAMING_SNAKE_CASE = None
else:
_SCREAMING_SNAKE_CASE = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_SCREAMING_SNAKE_CASE = scheduler.step(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , prev_timestep=UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
_SCREAMING_SNAKE_CASE = pred_prev_sample
_SCREAMING_SNAKE_CASE = torch.sum(torch.abs(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: str ):
'''simple docstring'''
pass
| 359 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Any = "mobilenet_v1"
def __init__( self: List[str] , UpperCAmelCase_: List[Any]=3 , UpperCAmelCase_: List[str]=224 , UpperCAmelCase_: Optional[Any]=1.0 , UpperCAmelCase_: Dict=8 , UpperCAmelCase_: List[str]="relu6" , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: Dict=0.9_99 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Tuple=0.0_01 , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = depth_multiplier
_SCREAMING_SNAKE_CASE = min_depth
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = tf_padding
_SCREAMING_SNAKE_CASE = classifier_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[Any] = version.parse("1.11" )
@property
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return 1E-4
| 125 | 0 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __lowerCAmelCase ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( _a ):
__a = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __UpperCAmelCase ( self , _a , _a , *_a , **_a ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__a = kwargs.pop('''main_process_only''' , _a )
__a = kwargs.pop('''in_order''' , _a )
if self.isEnabledFor(_a ):
if self._should_log(_a ):
__a , __a = self.process(_a , _a )
self.logger.log(_a , _a , *_a , **_a )
elif in_order:
__a = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__a , __a = self.process(_a , _a )
self.logger.log(_a , _a , *_a , **_a )
state.wait_for_everyone()
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str = None ) -> Any:
if log_level is None:
__a = os.environ.get('''ACCELERATE_LOG_LEVEL''' , lowerCAmelCase__ )
__a = logging.getLogger(lowerCAmelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCAmelCase__ , {} )
| 45 |
'''simple docstring'''
from itertools import count
def a__ ( a__ = 50 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [1] * min_block_length
for n in count(a__ ):
fill_count_functions.append(1 )
for block_length in range(a__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 267 | 0 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_lowercase : str = logging.get_logger(__name__)
_lowercase : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_lowercase : Optional[Any] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowercase__ ( snake_case_ :Any , snake_case_ :Optional[Any] , snake_case_ :Optional[Any] , snake_case_ :Optional[int] , snake_case_ :Optional[int] ):
for attribute in key.split('''.''' ):
__UpperCAmelCase = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
__UpperCAmelCase = getattr(snake_case_ , snake_case_ ).shape
else:
__UpperCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCAmelCase = value
elif weight_type == "weight_g":
__UpperCAmelCase = value
elif weight_type == "weight_v":
__UpperCAmelCase = value
elif weight_type == "bias":
__UpperCAmelCase = value
else:
__UpperCAmelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Dict ):
__UpperCAmelCase = []
__UpperCAmelCase = fairseq_model.state_dict()
__UpperCAmelCase = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__UpperCAmelCase = True
if "*" in mapped_key:
__UpperCAmelCase = name.split(snake_case_ )[0].split('''.''' )[-2]
__UpperCAmelCase = mapped_key.replace('''*''' , snake_case_ )
if "weight_g" in name:
__UpperCAmelCase = '''weight_g'''
elif "weight_v" in name:
__UpperCAmelCase = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
__UpperCAmelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase = '''weight'''
else:
__UpperCAmelCase = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :Dict , snake_case_ :Optional[int] , snake_case_ :Optional[int] ):
__UpperCAmelCase = full_name.split('''conv_layers.''' )[-1]
__UpperCAmelCase = name.split('''.''' )
__UpperCAmelCase = int(items[0] )
__UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def lowercase__ ( snake_case_ :str , snake_case_ :int , snake_case_ :Any=None ):
# load the pre-trained checkpoints
__UpperCAmelCase = torch.load(snake_case_ )
__UpperCAmelCase = WavLMConfigOrig(checkpoint['''cfg'''] )
__UpperCAmelCase = WavLMOrig(snake_case_ )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
__UpperCAmelCase = WavLMConfig.from_pretrained(snake_case_ )
else:
__UpperCAmelCase = WavLMConfig()
__UpperCAmelCase = WavLMModel(snake_case_ )
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavlm.save_pretrained(snake_case_ )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_lowercase : Optional[Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 86 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_lowercase : Any = True
except ImportError:
_lowercase : str = False
try:
from torch.hub import _get_torch_home
_lowercase : Any = _get_torch_home()
except ImportError:
_lowercase : Dict = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
_lowercase : Tuple = os.path.join(torch_cache_home, 'transformers')
_lowercase : int = 'https://cdn.huggingface.co'
_lowercase : Union[str, Any] = 'https://s3.amazonaws.com/models.huggingface.co/bert'
_lowercase : str = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
_lowercase : str = os.path.join(PATH, 'config.yaml')
_lowercase : int = os.path.join(PATH, 'attributes.txt')
_lowercase : List[str] = os.path.join(PATH, 'objects.txt')
_lowercase : Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
_lowercase : int = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
_lowercase : Dict = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
_lowercase : Union[str, Any] = 'pytorch_model.bin'
_lowercase : List[str] = 'config.yaml'
def lowercase__ ( snake_case_ :int=OBJECTS , snake_case_ :Optional[int]=ATTRIBUTES ):
__UpperCAmelCase = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__UpperCAmelCase = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__UpperCAmelCase = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__UpperCAmelCase = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__UpperCAmelCase = v
return r
class _UpperCAmelCase :
a__ : Tuple = {}
def __init__( self : List[str] , _lowercase : dict , _lowercase : str = "root" , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = name
__UpperCAmelCase = level
__UpperCAmelCase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase = copy.deepcopy(_lowercase )
__UpperCAmelCase = copy.deepcopy(_lowercase )
if isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = Config(_lowercase , name=_lowercase , level=level + 1 )
__UpperCAmelCase = v
setattr(self , _lowercase , _lowercase )
__UpperCAmelCase = d
def __repr__( self : Any ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Dict ):
__UpperCAmelCase = val
__UpperCAmelCase = val
__UpperCAmelCase = key.split('''.''' )
__UpperCAmelCase = len(_lowercase ) - 1
__UpperCAmelCase = self._pointer
if len(_lowercase ) > 1:
for i, l in enumerate(_lowercase ):
if hasattr(self , _lowercase ) and isinstance(getattr(self , _lowercase ) , _lowercase ):
setattr(getattr(self , _lowercase ) , '''.'''.join(levels[i:] ) , _lowercase )
if l == last_level:
__UpperCAmelCase = val
else:
__UpperCAmelCase = pointer[l]
def a ( self : int ):
return self._pointer
def a ( self : List[str] , _lowercase : Dict , _lowercase : str ):
with open(F'''{file_name}''' , '''w''' ) as stream:
dump(_lowercase , _lowercase )
def a ( self : int , _lowercase : Dict , _lowercase : Tuple ):
with open(F'''{file_name}''' , '''w''' ) as stream:
json.dump(_lowercase , _lowercase )
@staticmethod
def a ( _lowercase : str ):
with open(_lowercase ) as stream:
__UpperCAmelCase = load(_lowercase , Loader=_lowercase )
return data
def __str__( self : Dict ):
__UpperCAmelCase = ''' '''
if self._name != "root":
__UpperCAmelCase = F'''{t * (self._level-1)}{self._name}:\n'''
else:
__UpperCAmelCase = ''''''
__UpperCAmelCase = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_lowercase , _lowercase ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(_lowercase ).__name__})\n'''
__UpperCAmelCase = level
return r[:-1]
@classmethod
def a ( cls : str , _lowercase : str , **_lowercase : Any ):
__UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(_lowercase , **_lowercase )
return cls(_lowercase )
@classmethod
def a ( cls : Any , _lowercase : str , **_lowercase : str ):
__UpperCAmelCase = kwargs.pop('''cache_dir''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''force_download''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''resume_download''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''proxies''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''local_files_only''' , _lowercase )
if os.path.isdir(_lowercase ):
__UpperCAmelCase = os.path.join(_lowercase , _lowercase )
elif os.path.isfile(_lowercase ) or is_remote_url(_lowercase ):
__UpperCAmelCase = pretrained_model_name_or_path
else:
__UpperCAmelCase = hf_bucket_url(_lowercase , filename=_lowercase , use_cdn=_lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase = cached_path(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase = Config.load_yaml(_lowercase )
except EnvironmentError:
__UpperCAmelCase = '''Can\'t load config for'''
raise EnvironmentError(_lowercase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(_lowercase ), kwargs
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = torch.load('''dump.pt''' , map_location=in_tensor.device )
__UpperCAmelCase = in_tensor.numpy()
__UpperCAmelCase = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( snake_case_ :str , snake_case_ :str , snake_case_ :List[str]=True ):
__UpperCAmelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase = '''/''' not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( snake_case_ :str , snake_case_ :Tuple , snake_case_ :List[str]=None , snake_case_ :List[str]=0 , snake_case_ :List[Any]=None , ):
__UpperCAmelCase = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__UpperCAmelCase = {'''user-agent''': ua}
if resume_size > 0:
__UpperCAmelCase = '''bytes=%d-''' % (resume_size,)
__UpperCAmelCase = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase = response.headers.get('''Content-Length''' )
__UpperCAmelCase = resume_size + int(snake_case_ ) if content_length is not None else None
__UpperCAmelCase = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :str=None , snake_case_ :Optional[int]=False , snake_case_ :List[Any]=None , snake_case_ :List[Any]=10 , snake_case_ :Optional[int]=False , snake_case_ :List[str]=None , snake_case_ :Union[str, Any]=False , ):
if cache_dir is None:
__UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__UpperCAmelCase = None
if not local_files_only:
try:
__UpperCAmelCase = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__UpperCAmelCase = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__UpperCAmelCase = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__UpperCAmelCase = _resumable_file_manager
if os.path.exists(snake_case_ ):
__UpperCAmelCase = os.stat(snake_case_ ).st_size
else:
__UpperCAmelCase = 0
else:
__UpperCAmelCase = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__UpperCAmelCase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__UpperCAmelCase = {'''url''': url, '''etag''': etag}
__UpperCAmelCase = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def lowercase__ ( snake_case_ :int , snake_case_ :str=None ):
__UpperCAmelCase = url.encode('''utf-8''' )
__UpperCAmelCase = shaaaa(snake_case_ )
__UpperCAmelCase = url_hash.hexdigest()
if etag:
__UpperCAmelCase = etag.encode('''utf-8''' )
__UpperCAmelCase = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase__ ( snake_case_ :Dict , snake_case_ :List[Any]=None , snake_case_ :List[Any]=False , snake_case_ :Optional[int]=None , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=None , snake_case_ :Any=False , snake_case_ :int=False , snake_case_ :Optional[int]=False , ):
if cache_dir is None:
__UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__UpperCAmelCase = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase = os.path.split(snake_case_ )
__UpperCAmelCase = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__UpperCAmelCase = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[Any]="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__UpperCAmelCase = eval(f.read() )
else:
__UpperCAmelCase = requests.get(snake_case_ )
try:
__UpperCAmelCase = requests.json()
except Exception:
__UpperCAmelCase = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase = eval(snake_case_ )
except Exception:
__UpperCAmelCase = data.split('''\n''' )
req.close()
return data
def lowercase__ ( snake_case_ :Union[str, Any] ):
__UpperCAmelCase = requests.get(snake_case_ )
__UpperCAmelCase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__UpperCAmelCase = pkl.load(snake_case_ )
__UpperCAmelCase = weights.pop('''model''' )
__UpperCAmelCase = {}
for k, v in model.items():
__UpperCAmelCase = torch.from_numpy(snake_case_ )
if "running_var" in k:
__UpperCAmelCase = torch.tensor([0] )
__UpperCAmelCase = k.replace('''running_var''' , '''num_batches_tracked''' )
__UpperCAmelCase = zero
return new
def lowercase__ ( ):
print(F'''{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__UpperCAmelCase = cva.imread(snake_case_ )
else:
__UpperCAmelCase = get_image_from_url(snake_case_ )
assert img is not None, F'''could not connect to: {im}'''
__UpperCAmelCase = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase = img[:, :, ::-1]
return img
def lowercase__ ( snake_case_ :Any , snake_case_ :int=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ ))
| 86 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BlenderbotSmallTokenizer
A = False
def a_ (self ) -> List[str]:
super().setUp()
__UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def a_ (self , **_UpperCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : List[Any] = "adapt act apte"
__UpperCamelCase : Dict = "adapt act apte"
return input_text, output_text
def a_ (self ) -> int:
__UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase : str = "adapt act apte"
__UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"]
__UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase : Any = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
__UpperCamelCase : Dict = "I am a small frog."
__UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ (self ) -> List[Any]:
__UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__UpperCamelCase : Tuple = "I am a small frog ."
__UpperCamelCase : List[str] = "."
__UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 298 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Tuple = torch.exp(snake_case__ )
__UpperCamelCase : str = torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i)
__UpperCamelCase : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case__ ) - B / A
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Union[str, Any]:
super().__init__()
__UpperCamelCase : Any = config.output_attentions
__UpperCamelCase : Dict = config.output_hidden_states
__UpperCamelCase : Union[str, Any] = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Tuple = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Optional[int] = [-1 for _ in range(config.num_hidden_layers )]
def a_ (self , _UpperCAmelCase ) -> int:
if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__UpperCamelCase : str = x
else:
__UpperCamelCase : List[Any] = x
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ()
__UpperCamelCase : Tuple = ()
__UpperCamelCase : Dict = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__UpperCamelCase : Tuple = all_hidden_states + (hidden_states,)
__UpperCamelCase : Optional[int] = layer_module(
_UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Tuple = layer_outputs[0]
if self.output_attentions:
__UpperCamelCase : Optional[Any] = all_attentions + (layer_outputs[1],)
__UpperCamelCase : Any = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Any = current_outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : int = current_outputs + (all_attentions,)
__UpperCamelCase : Optional[int] = self.highway[i](_UpperCAmelCase )
# logits, pooled_output
if not self.training:
__UpperCamelCase : Dict = highway_exit[0]
__UpperCamelCase : Any = entropy(_UpperCAmelCase )
__UpperCamelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__UpperCamelCase : Optional[Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__UpperCamelCase : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_UpperCAmelCase , i + 1 )
else:
__UpperCamelCase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__UpperCamelCase : int = all_hidden_states + (hidden_states,)
__UpperCamelCase : Dict = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Union[str, Any] = outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : Optional[int] = outputs + (all_attentions,)
__UpperCamelCase : List[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = config
__UpperCamelCase : Dict = BertEmbeddings(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = DeeBertEncoder(_UpperCAmelCase )
__UpperCamelCase : str = BertPooler(_UpperCAmelCase )
self.init_weights()
def a_ (self ) -> Any:
self.encoder.init_highway_pooler(self.pooler )
def a_ (self ) -> Optional[int]:
return self.embeddings.word_embeddings
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : int = value
def a_ (self , _UpperCAmelCase ) -> Tuple:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__UpperCamelCase : Tuple = input_ids.size()
elif inputs_embeds is not None:
__UpperCamelCase : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__UpperCamelCase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCamelCase : int = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if encoder_attention_mask is None:
__UpperCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__UpperCamelCase : Optional[Any] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__UpperCamelCase : Any = encoder_attention_mask[:, None, None, :]
__UpperCamelCase : List[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__UpperCamelCase : Dict = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCamelCase : Dict = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__UpperCamelCase : Optional[int] = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__UpperCamelCase : Union[str, Any] = encoder_outputs[0]
__UpperCamelCase : Any = self.pooler(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
__UpperCamelCase : Tuple = message
__UpperCamelCase : Union[str, Any] = exit_layer # start from 1!
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__()
__UpperCamelCase : Union[str, Any] = BertPooler(_UpperCAmelCase )
__UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def a_ (self , _UpperCAmelCase ) -> Any:
# Pooler
__UpperCamelCase : Optional[int] = encoder_outputs[0]
__UpperCamelCase : str = self.pooler(_UpperCAmelCase )
# "return" pooler_output
# BertModel
__UpperCamelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__UpperCamelCase : Dict = bmodel_output[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Any = self.classifier(_UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Any:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : List[Any] = config.num_labels
__UpperCamelCase : List[Any] = config.num_hidden_layers
__UpperCamelCase : Optional[int] = DeeBertModel(_UpperCAmelCase )
__UpperCamelCase : List[str] = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : str = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> int:
__UpperCamelCase : int = self.num_layers
try:
__UpperCamelCase : Tuple = self.bert(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__UpperCamelCase : str = outputs[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Dict = self.classifier(_UpperCAmelCase )
__UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase : int = e.message
__UpperCamelCase : Optional[Any] = e.exit_layer
__UpperCamelCase : Optional[int] = outputs[0]
if not self.training:
__UpperCamelCase : Optional[int] = entropy(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Any = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : List[str] = MSELoss()
__UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Dict = CrossEntropyLoss()
__UpperCamelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCamelCase : List[Any] = []
for highway_exit in outputs[-1]:
__UpperCamelCase : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : Union[str, Any] = MSELoss()
__UpperCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Optional[Any] = CrossEntropyLoss()
__UpperCamelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_UpperCAmelCase )
if train_highway:
__UpperCamelCase : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase : Dict = (loss,) + outputs
if not self.training:
__UpperCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 298 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = SwinvaConfig()
_lowerCAmelCase : Any = swinva_name.split('_' )
_lowerCAmelCase : Dict = name_split[1]
if "to" in name_split[3]:
_lowerCAmelCase : Tuple = int(name_split[3][-3:] )
else:
_lowerCAmelCase : Any = int(name_split[3] )
if "to" in name_split[2]:
_lowerCAmelCase : int = int(name_split[2][-2:] )
else:
_lowerCAmelCase : int = int(name_split[2][6:] )
if model_size == "tiny":
_lowerCAmelCase : Union[str, Any] = 9_6
_lowerCAmelCase : List[str] = (2, 2, 6, 2)
_lowerCAmelCase : str = (3, 6, 1_2, 2_4)
elif model_size == "small":
_lowerCAmelCase : str = 9_6
_lowerCAmelCase : List[str] = (2, 2, 1_8, 2)
_lowerCAmelCase : List[str] = (3, 6, 1_2, 2_4)
elif model_size == "base":
_lowerCAmelCase : int = 1_2_8
_lowerCAmelCase : Any = (2, 2, 1_8, 2)
_lowerCAmelCase : str = (4, 8, 1_6, 3_2)
else:
_lowerCAmelCase : Union[str, Any] = 1_9_2
_lowerCAmelCase : Dict = (2, 2, 1_8, 2)
_lowerCAmelCase : List[Any] = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
_lowerCAmelCase : List[Any] = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_lowerCAmelCase : List[str] = 2_1_8_4_1
_lowerCAmelCase : List[Any] = 'huggingface/label-files'
_lowerCAmelCase : str = 'imagenet-22k-id2label.json'
_lowerCAmelCase : int = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase : Optional[Any] = {int(_A ): v for k, v in idalabel.items()}
_lowerCAmelCase : List[str] = idalabel
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
_lowerCAmelCase : List[Any] = 1_0_0_0
_lowerCAmelCase : Any = 'huggingface/label-files'
_lowerCAmelCase : Union[str, Any] = 'imagenet-1k-id2label.json'
_lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase : Optional[Any] = {int(_A ): v for k, v in idalabel.items()}
_lowerCAmelCase : Tuple = idalabel
_lowerCAmelCase : Any = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : int = img_size
_lowerCAmelCase : int = num_classes
_lowerCAmelCase : Dict = embed_dim
_lowerCAmelCase : Optional[Any] = depths
_lowerCAmelCase : List[Any] = num_heads
_lowerCAmelCase : List[Any] = window_size
return config
def lowercase (_A ):
"""simple docstring"""
if "patch_embed.proj" in name:
_lowerCAmelCase : List[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowerCAmelCase : List[str] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowerCAmelCase : str = 'encoder.' + name
if "attn.proj" in name:
_lowerCAmelCase : Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_lowerCAmelCase : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowerCAmelCase : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCAmelCase : List[Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowerCAmelCase : Any = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCAmelCase : List[Any] = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
_lowerCAmelCase : List[Any] = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
_lowerCAmelCase : List[Any] = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
_lowerCAmelCase : List[Any] = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
_lowerCAmelCase : Tuple = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
_lowerCAmelCase : str = 'layernorm.weight'
if name == "norm.bias":
_lowerCAmelCase : int = 'layernorm.bias'
if "head" in name:
_lowerCAmelCase : Tuple = name.replace('head' , 'classifier' )
else:
_lowerCAmelCase : Optional[int] = 'swinv2.' + name
return name
def lowercase (_A , _A ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_A )
if "mask" in key:
continue
elif "qkv" in key:
_lowerCAmelCase : Union[str, Any] = key.split('.' )
_lowerCAmelCase : List[str] = int(key_split[1] )
_lowerCAmelCase : Optional[Any] = int(key_split[3] )
_lowerCAmelCase : Optional[Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCAmelCase : List[Any] = val[:dim, :]
_lowerCAmelCase : List[str] = val[dim : dim * 2, :]
_lowerCAmelCase : Union[str, Any] = val[-dim:, :]
else:
_lowerCAmelCase : List[Any] = val[:dim]
_lowerCAmelCase : Union[str, Any] = val[
dim : dim * 2
]
_lowerCAmelCase : Tuple = val[-dim:]
else:
_lowerCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = timm.create_model(_A , pretrained=_A )
timm_model.eval()
_lowerCAmelCase : Any = get_swinva_config(_A )
_lowerCAmelCase : Union[str, Any] = SwinvaForImageClassification(_A )
model.eval()
_lowerCAmelCase : Any = convert_state_dict(timm_model.state_dict() , _A )
model.load_state_dict(_A )
_lowerCAmelCase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
_lowerCAmelCase : List[Any] = Image.open(requests.get(_A , stream=_A ).raw )
_lowerCAmelCase : int = image_processor(images=_A , return_tensors='pt' )
_lowerCAmelCase : Any = timm_model(inputs['pixel_values'] )
_lowerCAmelCase : Dict = model(**_A ).logits
assert torch.allclose(_A , _A , atol=1E-3 )
print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_A )
model.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCAmelCase : str = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 361 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : Union[str, Any] = 'post_processor'
_lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
_lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Any = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : str = tuple(state['cls'] )
_lowerCAmelCase : List[str] = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : Tuple = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
_lowerCAmelCase : Union[str, Any] = trim_offsets
_lowerCAmelCase : Optional[int] = True
if changes_to_apply:
_lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) )
_lowerCAmelCase : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def a ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
_lowerCAmelCase : Tuple = value
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = ['''image_processor''', '''tokenizer''']
__a : List[str] = '''CLIPImageProcessor'''
__a : List[str] = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase__ , )
__lowercase = kwargs.pop('''feature_extractor''' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__lowercase = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
__lowercase = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 210 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a : Optional[int] = logging.get_logger(__name__)
__a : Tuple = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Dict = '''data2vec-text'''
def __init__( self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 210 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_UpperCAmelCase : str = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def a__ ( lowercase : int, lowercase : Dict, lowercase : List[str], lowercase : int=None ) -> int:
"""simple docstring"""
_UpperCamelCase = XLNetConfig.from_json_file(lowercase )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(lowercase )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(lowercase )
else:
_UpperCamelCase = XLNetLMHeadModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowercase, lowercase, lowercase )
# Save pytorch-model
_UpperCamelCase = os.path.join(lowercase, lowercase )
_UpperCamelCase = os.path.join(lowercase, lowercase )
print(F"""Save PyTorch model to {os.path.abspath(lowercase )}""" )
torch.save(model.state_dict(), lowercase )
print(F"""Save configuration file to {os.path.abspath(lowercase )}""" )
with open(lowercase, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
_UpperCAmelCase : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 353 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase__ : List[str] = sys.version_info >= (3, 10)
def a__ ( lowercase : Dict=None, lowercase : List[str]=None ) -> List[Any]:
"""simple docstring"""
return field(default_factory=lambda: default, metadata=lowercase )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : int
_snake_case : float
_snake_case : str
_snake_case : bool
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : int = 4_2
_snake_case : str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : bool = False
_snake_case : bool = True
_snake_case : Optional[bool] = None
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = 'titi'
_snake_case : Union[str, Any] = 'toto'
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = 'titi'
_snake_case : Union[str, Any] = 'toto'
_snake_case : Any = 4_2
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : BasicEnum = "toto"
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = BasicEnum(self.foo )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : MixedTypeEnum = "toto"
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = MixedTypeEnum(self.foo )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : Optional[int] = None
_snake_case : Optional[float] = field(default=__magic_name__ , metadata={'help': 'help message'} )
_snake_case : Optional[str] = None
_snake_case : Optional[List[str]] = list_field(default=[] )
_snake_case : Optional[List[int]] = list_field(default=[] )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[int] = list_field(default=[] )
_snake_case : List[int] = list_field(default=[1, 2, 3] )
_snake_case : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_snake_case : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[int] = field()
_snake_case : str = field()
_snake_case : BasicEnum = field()
def snake_case__ ( self : str ) -> Any:
'''simple docstring'''
_UpperCamelCase = BasicEnum(self.required_enum )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : int
_snake_case : "BasicEnum" = field()
_snake_case : "Optional[bool]" = None
_snake_case : "str" = field(default='toto' , metadata={'help': 'help message'} )
_snake_case : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : bool = False
_snake_case : bool = True
_snake_case : bool | None = None
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : int | None = None
_snake_case : float | None = field(default=__magic_name__ , metadata={'help': 'help message'} )
_snake_case : str | None = None
_snake_case : list[str] | None = list_field(default=[] )
_snake_case : list[int] | None = list_field(default=[] )
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : argparse.ArgumentParser , lowerCAmelCase__ : argparse.ArgumentParser ) -> str:
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_UpperCamelCase = {k: v for k, v in vars(lowerCAmelCase__ ).items() if k != '''container'''}
_UpperCamelCase = {k: v for k, v in vars(lowerCAmelCase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , lowerCAmelCase__ ) and yy.get('''choices''' , lowerCAmelCase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](lowerCAmelCase__ ) , yy['''type'''](lowerCAmelCase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument('''--bar''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument('''--baz''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument('''--flag''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , const=lowerCAmelCase__ , nargs='''?''' )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((_UpperCamelCase) , ) = parser.parse_args_into_dataclasses(lowerCAmelCase__ , look_for_args_file=lowerCAmelCase__ )
self.assertFalse(example.flag )
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=lowerCAmelCase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowerCAmelCase__ , help='''help message''' )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , const=lowerCAmelCase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , const=lowerCAmelCase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowerCAmelCase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
_UpperCamelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCAmelCase__ )
for dataclass_type in dataclass_types:
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) )
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCamelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_UpperCamelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
_UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : Literal["titi", "toto", 4_2] = "toto"
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCamelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def snake_case__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowerCAmelCase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowerCAmelCase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowerCAmelCase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowerCAmelCase__ )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(
lowerCAmelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
_UpperCamelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(lowerCAmelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ )
expected.add_argument('''--bar''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowerCAmelCase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowerCAmelCase__ )
_UpperCamelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCAmelCase__ )
for dataclass_type in dataclass_types:
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , bar=lowerCAmelCase__ , baz=lowerCAmelCase__ , ces=[] , des=[] ) )
_UpperCamelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument('''--required_str''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowerCAmelCase__ , )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowerCAmelCase__ , )
expected.add_argument('''--opt''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowerCAmelCase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowerCAmelCase__ )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
_UpperCamelCase = parser.parse_dict(lowerCAmelCase__ )[0]
_UpperCamelCase = BasicExample(**lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(lowerCAmelCase__ , parser.parse_dict , lowerCAmelCase__ , allow_extra_keys=lowerCAmelCase__ )
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(lowerCAmelCase__ , '''temp_json''' )
os.mkdir(lowerCAmelCase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
_UpperCamelCase = BasicExample(**lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(lowerCAmelCase__ , '''temp_yaml''' )
os.mkdir(lowerCAmelCase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
_UpperCamelCase = BasicExample(**lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
| 287 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( __a ):
__a : int = ["""image_processor""", """tokenizer"""]
__a : Union[str, Any] = """ChineseCLIPImageProcessor"""
__a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase , lowercase )
UpperCAmelCase = self.image_processor
def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , )
return self.image_processor_class
| 34 |
"""simple docstring"""
def lowercase ( __snake_case : int ):
if n == 1 or not isinstance(__snake_case , __snake_case ):
return 0
elif n == 2:
return 1
else:
lowercase_ : Dict = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __snake_case : int ):
lowercase_ : str = 0
lowercase_ : List[str] = 2
while digits < n:
index += 1
lowercase_ : Any = len(str(fibonacci(__snake_case ) ) )
return index
def lowercase ( __snake_case : int = 1_0_0_0 ):
return fibonacci_digits_index(__snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase : Optional[Any] = 16
lowercase : str = 32
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 16 , SCREAMING_SNAKE_CASE__ = "bert-base-cased" ) -> List[str]:
lowercase : List[str] = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowercase : List[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(SCREAMING_SNAKE_CASE__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase : List[str] = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=UpperCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(SCREAMING_SNAKE_CASE__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(UpperCAmelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowercase : Any = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
lowercase : int = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
# Initialize accelerator
lowercase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : Any = config["""lr"""]
lowercase : str = int(config["""num_epochs"""] )
lowercase : Tuple = int(config["""seed"""] )
lowercase : Optional[Any] = int(config["""batch_size"""] )
lowercase : List[Any] = args.model_name_or_path
set_seed(UpperCAmelCase__ )
lowercase : Dict = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : int = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
# Instantiate optimizer
lowercase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase : List[Any] = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
lowercase : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowercase : int = 1
lowercase : Union[str, Any] = (len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase__ , )
else:
lowercase : Optional[int] = DummyScheduler(UpperCAmelCase__ , total_num_steps=UpperCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase : List[str] = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
lowercase : str = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase : str = 0
# Now we train the model
lowercase : int = evaluate.load("""glue""" , """mrpc""" )
lowercase : Tuple = 0
lowercase : Tuple = {}
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
lowercase : Optional[Any] = model(**UpperCAmelCase__ )
lowercase : int = outputs.loss
lowercase : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase : Tuple = 0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : Tuple = model(**UpperCAmelCase__ )
lowercase : int = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase : Dict = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase__ ) - 1:
lowercase : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
lowercase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , UpperCAmelCase__ )
lowercase : Tuple = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
lowercase : List[Any] = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def _snake_case( ) -> int:
lowercase : int = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=UpperCAmelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=UpperCAmelCase__ , )
parser.add_argument(
"""--output_dir""" , type=UpperCAmelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=UpperCAmelCase__ , default=3 , help="""Number of train epochs.""" , )
lowercase : Union[str, Any] = parser.parse_args()
lowercase : int = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 371 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : Optional[int]= None
_a : Optional[Any]= BloomTokenizerFast
_a : Tuple= BloomTokenizerFast
_a : str= True
_a : Optional[int]= False
_a : List[Any]= "tokenizer_file"
_a : List[Any]= {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
lowercase : Optional[Any] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.get_rust_tokenizer()
lowercase : List[str] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
lowercase : Optional[int] = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any = tokenizer.batch_encode_plus(snake_case )["""input_ids"""]
self.assertListEqual(snake_case ,snake_case )
lowercase : Optional[int] = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase : Dict = self.rust_tokenizer_class.from_pretrained(snake_case ,**snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Dict = """This is a simple input"""
lowercase : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase : Dict = ("""This is a simple input""", """This is a pair""")
lowercase : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case ,max_length=snake_case )
tokenizer_r.encode_plus(snake_case ,max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case ,max_length=snake_case )
tokenizer_r.encode(snake_case ,max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case ,max_length=snake_case )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
lowercase : Optional[int] = None # Hotfixing padding = None
self.assertRaises(snake_case ,tokenizer_r.encode ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Simple input
self.assertRaises(snake_case ,tokenizer_r.encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Simple input
self.assertRaises(
snake_case ,tokenizer_r.batch_encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ,)
# Pair input
self.assertRaises(snake_case ,tokenizer_r.encode ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Pair input
self.assertRaises(snake_case ,tokenizer_r.encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Pair input
self.assertRaises(
snake_case ,tokenizer_r.batch_encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.get_rust_tokenizer()
lowercase : List[str] = load_dataset("""xnli""" ,"""all_languages""" ,split="""test""" ,streaming=snake_case )
lowercase : Optional[Any] = next(iter(snake_case ) )["""premise"""] # pick up one data
lowercase : str = list(sample_data.values() )
lowercase : Optional[int] = list(map(tokenizer.encode ,snake_case ) )
lowercase : Dict = [tokenizer.decode(snake_case ,clean_up_tokenization_spaces=snake_case ) for x in output_tokens]
self.assertListEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
| 285 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCamelCase__ = [[1, 2, 4], [1, 2, 3, 4]]
UpperCamelCase__ = DisjunctiveConstraint(a )
self.assertTrue(isinstance(dc.token_ids , a ) )
with self.assertRaises(a ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(a ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __a ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCamelCase__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(a ):
DisjunctiveConstraint(a ) # fails here
def __a ( self ):
UpperCamelCase__ = [[1, 2, 3], [1, 2, 4]]
UpperCamelCase__ = DisjunctiveConstraint(a )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(1 )
UpperCamelCase__ = stepped is True and completed is False and reset is False
self.assertTrue(a )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(2 )
UpperCamelCase__ = stepped is True and completed is False and reset is False
self.assertTrue(a )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(3 )
UpperCamelCase__ = stepped is True and completed is True and reset is False
self.assertTrue(a )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __a ( self ):
UpperCamelCase__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCamelCase__ = DisjunctiveConstraint(a )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 80 | """simple docstring"""
import copy
import re
class snake_case__ :
_snake_case : Dict = """hp"""
_snake_case : List[str] = {}
_snake_case : int = None
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase ):
__a = prefix
__a = defaults
cls.build_naming_info()
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) == 0:
return ""
__a = None
if any(char.isdigit() for char in word ):
raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase ) + 1 ):
__a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCamelCase ):
__a = ""
while integer != 0:
__a = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__a = 0
while True:
__a = word + "#" + int_to_alphabetic(lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
__a = sword
break
__a = short_word
__a = word
return short_word
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = param_name.split("_" )
__a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__a = ["", "_"]
for separator in separators:
__a = separator.join(lowerCamelCase )
if shortname not in info["reverse_short_param"]:
__a = shortname
__a = param_name
return shortname
return param_name
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase )
__a = short_name
__a = param_name
@classmethod
def a__ ( cls ):
if cls.NAMING_INFO is not None:
return
__a = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase , lowerCamelCase )
__a = info
@classmethod
def a__ ( cls , lowerCamelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
__a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__a = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = 1 if v else 0
__a = "" if isinstance(lowerCamelCase , (int, float) ) else "-"
__a = F"{key}{sep}{v}"
name.append(lowerCamelCase )
return "_".join(lowerCamelCase )
@classmethod
def a__ ( cls , lowerCamelCase ):
__a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__a = []
else:
__a = repr.split("_" )
__a = {}
for value in values:
if "-" in value:
__a , __a = value.split("-" )
else:
__a = re.sub("[0-9.]" , "" , lowerCamelCase )
__a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) )
__a = cls.NAMING_INFO["reverse_short_param"][p_k]
__a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__a = cls.DEFAULTS[k]
return parameters
| 261 | 0 |
'''simple docstring'''
import math
import unittest
def __lowerCAmelCase ( UpperCamelCase__ ) -> bool:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
with self.assertRaises(a ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 237 | '''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ="▁"
__UpperCAmelCase ={"vocab_file": "prophetnet.tokenizer"}
__UpperCAmelCase ={
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
__UpperCAmelCase ={
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
__UpperCAmelCase ={
"microsoft/xprophetnet-large-wiki100-cased": 5_1_2,
}
def __lowerCAmelCase ( UpperCamelCase__ ) -> List[str]:
__lowerCamelCase = collections.OrderedDict()
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as reader:
__lowerCamelCase = reader.readlines()
for index, token in enumerate(UpperCamelCase__ ):
__lowerCamelCase = token.rstrip('''\n''' )
__lowerCamelCase = index
return vocab
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES
lowerCamelCase : Any =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Union[str, Any] =["input_ids", "attention_mask"]
def __init__( self : int , a : List[str] , a : Optional[int]="[SEP]" , a : int="[SEP]" , a : str="[SEP]" , a : List[Any]="[UNK]" , a : List[Any]="[PAD]" , a : str="[CLS]" , a : List[str]="[MASK]" , a : Optional[Dict[str, Any]] = None , **a : str , ):
"""simple docstring"""
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , sep_token=a , unk_token=a , pad_token=a , cls_token=a , mask_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a ) )
__lowerCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__lowerCamelCase = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
__lowerCamelCase = f"""[unused{i}]"""
__lowerCamelCase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__lowerCamelCase = 12
__lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(a )
def __getstate__( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self : int , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return ([0] * len(a )) + [1]
return ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : str ):
"""simple docstring"""
return self.sp_model.encode(a , out_type=a )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : int ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCamelCase = self.sp_model.PieceToId(a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Union[str, Any] ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = ''''''.join(a ).replace(a , ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : int , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , '''wb''' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self : Any , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 237 | 1 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="align_text_model"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__="absolute" , snake_case__=True , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : str = vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Optional[int] = layer_norm_eps
lowerCAmelCase : List[Any] = position_embedding_type
lowerCAmelCase : Tuple = use_cache
lowerCAmelCase : Union[str, Any] = pad_token_id
@classmethod
def lowercase__ ( cls , snake_case__ , **snake_case__ ):
"""simple docstring"""
cls._set_token_in_kwargs(snake_case__ )
lowerCAmelCase , lowerCAmelCase : Dict = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowerCAmelCase : List[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="align_vision_model"
def __init__( self , snake_case__ = 3 , snake_case__ = 600 , snake_case__ = 2.0 , snake_case__ = 3.1 , snake_case__ = 8 , snake_case__ = [3, 3, 5, 3, 5, 5, 3] , snake_case__ = [32, 16, 24, 40, 80, 112, 192] , snake_case__ = [16, 24, 40, 80, 112, 192, 320] , snake_case__ = [] , snake_case__ = [1, 2, 2, 2, 1, 2, 1] , snake_case__ = [1, 2, 2, 3, 3, 4, 1] , snake_case__ = [1, 6, 6, 6, 6, 6, 6] , snake_case__ = 0.25 , snake_case__ = "swish" , snake_case__ = 2_560 , snake_case__ = "mean" , snake_case__ = 0.02 , snake_case__ = 0.001 , snake_case__ = 0.99 , snake_case__ = 0.2 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : str = image_size
lowerCAmelCase : Optional[Any] = width_coefficient
lowerCAmelCase : Optional[int] = depth_coefficient
lowerCAmelCase : Tuple = depth_divisor
lowerCAmelCase : List[Any] = kernel_sizes
lowerCAmelCase : int = in_channels
lowerCAmelCase : Tuple = out_channels
lowerCAmelCase : Any = depthwise_padding
lowerCAmelCase : Optional[int] = strides
lowerCAmelCase : Union[str, Any] = num_block_repeats
lowerCAmelCase : Dict = expand_ratios
lowerCAmelCase : Optional[int] = squeeze_expansion_ratio
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : str = hidden_dim
lowerCAmelCase : Union[str, Any] = pooling_type
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Dict = batch_norm_eps
lowerCAmelCase : str = batch_norm_momentum
lowerCAmelCase : List[Any] = drop_connect_rate
lowerCAmelCase : Dict = sum(snake_case__ ) * 4
@classmethod
def lowercase__ ( cls , snake_case__ , **snake_case__ ):
"""simple docstring"""
cls._set_token_in_kwargs(snake_case__ )
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowerCAmelCase : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Tuple ="align"
a : Dict =True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=640 , snake_case__=1.0 , snake_case__=0.02 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
if text_config is None:
lowerCAmelCase : List[Any] = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
lowerCAmelCase : Union[str, Any] = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
lowerCAmelCase : Optional[int] = AlignTextConfig(**snake_case__ )
lowerCAmelCase : str = AlignVisionConfig(**snake_case__ )
lowerCAmelCase : List[str] = projection_dim
lowerCAmelCase : Union[str, Any] = temperature_init_value
lowerCAmelCase : Union[str, Any] = initializer_range
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , **snake_case__ ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase : Tuple = self.text_config.to_dict()
lowerCAmelCase : Union[str, Any] = self.vision_config.to_dict()
lowerCAmelCase : Dict = self.__class__.model_type
return output
| 108 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : int =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowerCAmelCase : Dict = VideoClassificationPipeline(model=snake_case__ , image_processor=snake_case__ , top_k=2 )
lowerCAmelCase : Any = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
for example in examples:
lowerCAmelCase : str = video_classifier(snake_case__ )
self.assertEqual(
snake_case__ , [
{"score": ANY(snake_case__ ), "label": ANY(snake_case__ )},
{"score": ANY(snake_case__ ), "label": ANY(snake_case__ )},
] , )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowerCAmelCase : str = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
lowerCAmelCase : int = pipeline(
"video-classification" , model=snake_case__ , feature_extractor=snake_case__ , frame_sampling_rate=4 )
lowerCAmelCase : Optional[int] = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowerCAmelCase : Union[str, Any] = video_classifier(snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
lowerCAmelCase : Tuple = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def lowercase__ ( self ):
"""simple docstring"""
pass
| 108 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ ( lowerCAmelCase_ )-> bytes:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_UpperCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
A_ : Union[str, Any] = input("""Enter Video/IGTV url: """).strip()
A_ : Dict = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 349 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""only integers accepted as input""" )
else:
_UpperCAmelCase : Dict = str(abs(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = [list(lowerCAmelCase_ ) for char in range(len(lowerCAmelCase_ ) )]
for index in range(len(lowerCAmelCase_ ) ):
num_transpositions[index].pop(lowerCAmelCase_ )
return max(
int("""""".join(list(lowerCAmelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 349 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''ctrl'''
_snake_case : Tuple = ['''past_key_values''']
_snake_case : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCamelCase=2_4_6_5_3_4 , _UpperCamelCase=2_5_6 , _UpperCamelCase=1_2_8_0 , _UpperCamelCase=8_1_9_2 , _UpperCamelCase=4_8 , _UpperCamelCase=1_6 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1E-6 , _UpperCamelCase=0.02 , _UpperCamelCase=True , **_UpperCamelCase , ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Dict = n_positions
UpperCAmelCase_ : List[str] = n_embd
UpperCAmelCase_ : Optional[Any] = n_layer
UpperCAmelCase_ : List[Any] = n_head
UpperCAmelCase_ : Any = dff
UpperCAmelCase_ : Union[str, Any] = resid_pdrop
UpperCAmelCase_ : Any = embd_pdrop
UpperCAmelCase_ : int = layer_norm_epsilon
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Any = use_cache
super().__init__(**_UpperCamelCase )
| 29 |
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Dict = "wav2vec2"
def __init__(self : List[Any] , _A : List[str]=3_2 , _A : Dict=7_6_8 , _A : Optional[int]=1_2 , _A : Any=1_2 , _A : Tuple=3_0_7_2 , _A : Tuple="gelu" , _A : Union[str, Any]=0.1 , _A : int=0.1 , _A : Any=0.1 , _A : Any=0.0 , _A : List[Any]=0.0 , _A : Optional[int]=0.1 , _A : Optional[Any]=0.1 , _A : Optional[Any]=0.02 , _A : Tuple=1E-5 , _A : Optional[Any]="group" , _A : int="gelu" , _A : List[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : List[Any]=(5, 2, 2, 2, 2, 2, 2) , _A : str=(1_0, 3, 3, 3, 3, 2, 2) , _A : Optional[Any]=False , _A : Optional[Any]=1_2_8 , _A : Any=1_6 , _A : List[Any]=False , _A : List[Any]=True , _A : List[str]=0.05 , _A : List[str]=1_0 , _A : Optional[Any]=2 , _A : int=0.0 , _A : Dict=1_0 , _A : Dict=0 , _A : int=3_2_0 , _A : List[Any]=2 , _A : List[Any]=0.1 , _A : Any=1_0_0 , _A : Any=2_5_6 , _A : Dict=2_5_6 , _A : Tuple=0.1 , _A : str="sum" , _A : List[Any]=False , _A : List[Any]=False , _A : Any=2_5_6 , _A : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , _A : List[Any]=(5, 3, 3, 1, 1) , _A : Tuple=(1, 2, 3, 1, 1) , _A : int=5_1_2 , _A : Dict=0 , _A : str=1 , _A : Optional[Any]=2 , _A : List[Any]=False , _A : List[str]=3 , _A : str=2 , _A : Optional[int]=3 , _A : Union[str, Any]=None , _A : Dict=None , **_A : Optional[int] , ) -> int:
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
snake_case = hidden_size
snake_case = feat_extract_norm
snake_case = feat_extract_activation
snake_case = list(_A )
snake_case = list(_A )
snake_case = list(_A )
snake_case = conv_bias
snake_case = num_conv_pos_embeddings
snake_case = num_conv_pos_embedding_groups
snake_case = len(self.conv_dim )
snake_case = num_hidden_layers
snake_case = intermediate_size
snake_case = hidden_act
snake_case = num_attention_heads
snake_case = hidden_dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = feat_proj_dropout
snake_case = final_dropout
snake_case = layerdrop
snake_case = layer_norm_eps
snake_case = initializer_range
snake_case = vocab_size
snake_case = do_stable_layer_norm
snake_case = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case = apply_spec_augment
snake_case = mask_time_prob
snake_case = mask_time_length
snake_case = mask_time_min_masks
snake_case = mask_feature_prob
snake_case = mask_feature_length
snake_case = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case = num_codevectors_per_group
snake_case = num_codevector_groups
snake_case = contrastive_logits_temperature
snake_case = feat_quantizer_dropout
snake_case = num_negatives
snake_case = codevector_dim
snake_case = proj_codevector_dim
snake_case = diversity_loss_weight
# ctc loss
snake_case = ctc_loss_reduction
snake_case = ctc_zero_infinity
# adapter
snake_case = add_adapter
snake_case = adapter_kernel_size
snake_case = adapter_stride
snake_case = num_adapter_layers
snake_case = output_hidden_size or hidden_size
snake_case = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case = list(_A )
snake_case = list(_A )
snake_case = list(_A )
snake_case = xvector_output_dim
@property
def UpperCAmelCase(self : Union[str, Any] ) -> str:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 137 |
class lowerCamelCase :
def __init__(self : List[Any] , _A : str ) -> Any:
# we need a list not a string, so do something to change the type
snake_case = arr.split("," )
def UpperCAmelCase(self : str ) -> str:
snake_case = [int(self.array[0] )] * len(self.array )
snake_case = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
snake_case = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
snake_case = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_A = input("please input some numbers:")
_A = SubArray(whole_array)
_A = array.solve_sub_array()
print(("the results is:", re))
| 137 | 1 |
"""simple docstring"""
import requests
UpperCAmelCase__ = 'YOUR API KEY'
def __UpperCAmelCase ( lowercase ,lowercase = giphy_api_key ):
"""simple docstring"""
_UpperCAmelCase = """+""".join(query.split() )
_UpperCAmelCase = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
_UpperCAmelCase = requests.get(_snake_case ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 289 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ):
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size
SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : List[Any] = []
for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Any = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : Dict = apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang
SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = []
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
words_batch.append(SCREAMING_SNAKE_CASE__ )
boxes_batch.append(SCREAMING_SNAKE_CASE__ )
if do_resize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : List[Any] = words_batch
SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch
return data
| 25 | 0 |
"""simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _snake_case ( _snake_case : Optional[Any] ):
if isinstance(_snake_case , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class snake_case_:
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Dict ):
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
def lowerCamelCase__ ( self : List[Any] ):
pass
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Any = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = TFVisionTextDualEncoderModel(UpperCamelCase_ )
lowerCAmelCase : str = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase_ , text_model=UpperCamelCase_ )
lowerCAmelCase : List[Any] = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int=None , **UpperCamelCase_ : Dict ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Any = {'''vision_model''': vision_model, '''text_model''': text_model}
lowerCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase_ )
lowerCAmelCase : str = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : List[Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Dict = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase_ , text_model=UpperCamelCase_ )
lowerCAmelCase : str = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCAmelCase : List[str] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : int = TFVisionTextDualEncoderModel.from_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model(input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCAmelCase : str = after_output[0].numpy()
lowerCAmelCase : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=None , **UpperCamelCase_ : List[Any] ):
lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase_ , text_model=UpperCamelCase_ )
lowerCAmelCase : Tuple = model(
input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , output_attentions=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(UpperCamelCase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase : Union[str, Any] = to_atuple(vision_model.config.image_size )
lowerCAmelCase : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase : List[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase : Dict = output.text_model_output.attentions
self.assertEqual(len(UpperCamelCase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : float ):
lowerCAmelCase : str = np.abs((a - b) ).max()
self.assertLessEqual(UpperCamelCase_ , UpperCamelCase_ , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : str = self.prepare_config_and_inputs()
self.check_save_load(**UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : Any = self.get_pretrained_model_and_inputs()
lowerCAmelCase : str = model_a(**UpperCamelCase_ )
lowerCAmelCase : Dict = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Any = TFVisionTextDualEncoderModel.from_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[str] = model_a(**UpperCamelCase_ )
lowerCAmelCase : Tuple = after_outputs[0].numpy()
lowerCAmelCase : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
@require_tf
class snake_case_( a__ , unittest.TestCase ):
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
lowerCAmelCase : Optional[int] = 1_3
lowerCAmelCase : List[str] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase : Tuple = random_attention_mask([batch_size, 4] )
lowerCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
lowerCAmelCase : List[str] = TFViTModel(UpperCamelCase_ , name='''vision_model''' )
lowerCAmelCase : Optional[int] = TFBertModel(UpperCamelCase_ , name='''text_model''' )
return vision_model, text_model
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = TFViTModelTester(self )
lowerCAmelCase : Tuple = TFBertModelTester(self )
lowerCAmelCase : List[Any] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[Any] = vision_config_and_inputs
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Tuple = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class snake_case_( a__ , unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
lowerCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
lowerCAmelCase : str = 1_3
lowerCAmelCase : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase : List[str] = random_attention_mask([batch_size, 4] )
lowerCAmelCase : Any = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : int ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.get_vision_text_model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase_ , text_model=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model(
input_ids=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , output_attentions=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(UpperCamelCase_ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase : Dict = to_atuple(vision_model.config.image_size )
lowerCAmelCase : str = to_atuple(vision_model.config.patch_size )
lowerCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase : Union[str, Any] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(UpperCamelCase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : List[Any] = TFDeiTModel(UpperCamelCase_ , name='''vision_model''' )
lowerCAmelCase : int = TFRobertaModel(UpperCamelCase_ , name='''text_model''' )
return vision_model, text_model
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : int = TFDeiTModelTester(self )
lowerCAmelCase : Optional[int] = TFRobertaModelTester(self )
lowerCAmelCase : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : int = vision_config_and_inputs
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class snake_case_( a__ , unittest.TestCase ):
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
lowerCAmelCase : Tuple = 1_3
lowerCAmelCase : Dict = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase : Optional[int] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
lowerCAmelCase : List[str] = TFCLIPVisionModel(UpperCamelCase_ , name='''vision_model''' )
lowerCAmelCase : List[Any] = TFBertModel(UpperCamelCase_ , name='''text_model''' )
return vision_model, text_model
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : int = TFCLIPVisionModelTester(self )
lowerCAmelCase : str = TFBertModelTester(self )
lowerCAmelCase : List[str] = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase : int = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = vision_config_and_inputs
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Tuple = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class snake_case_( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=UpperCamelCase_ )
lowerCAmelCase : List[str] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowerCAmelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase : int = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''np''' )
lowerCAmelCase : Tuple = model(**UpperCamelCase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase : Optional[int] = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , UpperCamelCase_ , atol=1E-3 ) )
| 314 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _snake_case ( _snake_case : Dict ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def _snake_case ( _snake_case : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCAmelCase : str = ord(_snake_case )
if not _is_chinese_char(_snake_case ):
return 0
return 1
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : List[Any] = set()
for token in tokens:
lowerCAmelCase : Union[str, Any] = len(_snake_case ) > 1 and is_chinese(_snake_case )
if chinese_word:
word_set.add(_snake_case )
lowerCAmelCase : List[str] = list(_snake_case )
return word_list
def _snake_case ( _snake_case : List[str] , _snake_case : set() ):
if not chinese_word_set:
return bert_tokens
lowerCAmelCase : List[Any] = max([len(_snake_case ) for w in chinese_word_set] )
lowerCAmelCase : Optional[Any] = bert_tokens
lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case )
while start < end:
lowerCAmelCase : str = True
if is_chinese(bert_word[start] ):
lowerCAmelCase : List[Any] = min(end - start , _snake_case )
for i in range(_snake_case , 1 , -1 ):
lowerCAmelCase : str = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase : Optional[Any] = '''##''' + bert_word[j]
lowerCAmelCase : Union[str, Any] = start + i
lowerCAmelCase : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ):
lowerCAmelCase : Optional[int] = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
lowerCAmelCase : Union[str, Any] = [get_chinese_word(_snake_case ) for r in res]
ltp_res.extend(_snake_case )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : int = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_snake_case , _snake_case ):
lowerCAmelCase : Optional[int] = []
for id in input_ids:
lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case )
input_tokens.append(_snake_case )
lowerCAmelCase : Any = add_sub_symbol(_snake_case , _snake_case )
lowerCAmelCase : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_snake_case ):
if token[:2] == "##":
lowerCAmelCase : Any = token[2:]
# save chinese tokens' pos
if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ):
ref_id.append(_snake_case )
ref_ids.append(_snake_case )
assert len(_snake_case ) == len(_snake_case )
return ref_ids
def _snake_case ( _snake_case : Dict ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[str] = f.readlines()
lowerCAmelCase : Union[str, Any] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase : int = prepare_ref(_snake_case , _snake_case , _snake_case )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(_snake_case )
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
snake_case__ : int = parser.parse_args()
main(args)
| 314 | 1 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = old_name
if "patch_embed" in old_name:
A_ , A_ , A_ : List[str] = old_name.split('.' )
if layer == "0":
A_ : Union[str, Any] = old_name.replace('0' , 'convolution1' )
elif layer == "1":
A_ : Dict = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
A_ : int = old_name.replace('3' , 'convolution2' )
else:
A_ : List[Any] = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(R'\d\.\d' , _snake_case ):
A_ : Optional[int] = R'\b\d{2}\b'
if bool(re.search(_snake_case , _snake_case ) ):
A_ : Dict = re.search(R'\d\.\d\d.' , _snake_case ).group()
else:
A_ : str = re.search(R'\d\.\d.' , _snake_case ).group()
if int(match[0] ) < 6:
A_ : Optional[Any] = old_name.replace(_snake_case , '' )
A_ : Union[str, Any] = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
A_ : str = 'intermediate_stages.' + trimmed_name
else:
A_ : Optional[int] = old_name.replace(_snake_case , '' )
if int(match[2] ) < num_meta4D_last_stage:
A_ : Tuple = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
A_ : List[Any] = str(int(match[2] ) - num_meta4D_last_stage )
A_ : Optional[Any] = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
A_ : List[str] = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
A_ : List[Any] = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
A_ : List[Any] = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
A_ : Union[str, Any] = trimmed_name.replace('fc2' , 'linear_out' )
A_ : List[Any] = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(R'.\d.' , _snake_case ):
A_ : List[str] = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
A_ : Union[str, Any] = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A_ : Any = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A_ : Any = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
A_ : Optional[Any] = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
A_ : Any = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
A_ : Tuple = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
A_ : Union[str, Any] = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A_ : Optional[int] = new_name.replace('norm' , 'layernorm' )
A_ : List[str] = 'efficientformer.' + new_name
else:
A_ : Optional[Any] = 'efficientformer.encoder.' + new_name
return new_name
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for key in checkpoint.copy().keys():
A_ : Tuple = checkpoint.pop(_snake_case )
A_ : List[Any] = val
return checkpoint
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : Any = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = torch.load(_snake_case , map_location='cpu' )['model']
A_ : Optional[Any] = EfficientFormerConfig.from_json_file(_snake_case )
A_ : Any = EfficientFormerForImageClassificationWithTeacher(_snake_case )
A_ : List[str] = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
A_ : str = config.depths[-1] - config.num_metaad_blocks + 1
A_ : Union[str, Any] = convert_torch_checkpoint(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
model.eval()
A_ : str = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
A_ : Any = prepare_img()
A_ : int = 256
A_ : Union[str, Any] = 224
A_ : List[Any] = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
A_ : Dict = processor(images=_snake_case , return_tensors='pt' ).pixel_values
# original processing pipeline
A_ : Dict = Compose(
[
Resize(_snake_case , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(_snake_case ),
ToTensor(),
Normalize(_snake_case , _snake_case ),
] )
A_ : int = image_transforms(_snake_case ).unsqueeze(0 )
assert torch.allclose(_snake_case , _snake_case )
A_ : List[str] = model(_snake_case )
A_ : Any = outputs.logits
A_ : Tuple = (1, 1000)
if "l1" in model_name:
A_ : int = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , _snake_case , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A_ : List[Any] = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , _snake_case , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A_ : List[Any] = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(_snake_case )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=_snake_case , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=_snake_case , )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
) | 286 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Any ):
"""simple docstring"""
__a =np.argmax(_snake_case , axis=1 )
return np.sum(outputs == labels )
def UpperCamelCase_( _snake_case : List[str] ):
"""simple docstring"""
with open(_snake_case , encoding='utf_8' ) as f:
__a =csv.reader(_snake_case )
__a =[]
next(_snake_case ) # skip the first line
for line in tqdm(_snake_case ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCamelCase_( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Dict , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Tuple ):
"""simple docstring"""
__a =[]
for dataset in encoded_datasets:
__a =len(_snake_case )
__a =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__a =np.zeros((n_batch, 2) , dtype=np.intaa )
__a =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__a =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_snake_case ):
__a =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a =with_conta
__a =with_conta
__a =len(_snake_case ) - 1
__a =len(_snake_case ) - 1
__a =with_conta
__a =with_conta
__a =mc_label
__a =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_snake_case ) for t in all_inputs ) )
return tensor_datasets
def UpperCamelCase_( ):
"""simple docstring"""
__a =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=_snake_case , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=_snake_case , type=_snake_case , required=_snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=_snake_case , default='' )
parser.add_argument('--eval_dataset' , type=_snake_case , default='' )
parser.add_argument('--seed' , type=_snake_case , default=42 )
parser.add_argument('--num_train_epochs' , type=_snake_case , default=3 )
parser.add_argument('--train_batch_size' , type=_snake_case , default=8 )
parser.add_argument('--eval_batch_size' , type=_snake_case , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=_snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=_snake_case , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=_snake_case , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=_snake_case , default=6.2_5e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=_snake_case , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=_snake_case , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=_snake_case , default=0.01 )
parser.add_argument('--lm_coef' , type=_snake_case , default=0.9 )
parser.add_argument('--n_valid' , type=_snake_case , default=374 )
parser.add_argument('--server_ip' , type=_snake_case , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_snake_case , default='' , help='Can be used for distant debugging.' )
__a =parser.parse_args()
print(_snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__a =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__a =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(_snake_case , _snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a =['_start_', '_delimiter_', '_classify_']
__a =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_snake_case )
__a =tokenizer.convert_tokens_to_ids(_snake_case )
__a =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_snake_case ) )
model.to(_snake_case )
# Load and encode the datasets
def tokenize_and_encode(_snake_case : int ):
if isinstance(_snake_case , _snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_snake_case ) )
elif isinstance(_snake_case , _snake_case ):
return obj
return [tokenize_and_encode(_snake_case ) for o in obj]
logger.info('Encoding dataset...' )
__a =load_rocstories_dataset(args.train_dataset )
__a =load_rocstories_dataset(args.eval_dataset )
__a =(train_dataset, eval_dataset)
__a =tokenize_and_encode(_snake_case )
# Compute the max input length for the Transformer
__a =model.config.n_positions // 2 - 2
__a =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__a =min(_snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a =pre_process_datasets(_snake_case , _snake_case , _snake_case , *_snake_case )
__a , __a =tensor_datasets[0], tensor_datasets[1]
__a =TensorDataset(*_snake_case )
__a =RandomSampler(_snake_case )
__a =DataLoader(_snake_case , sampler=_snake_case , batch_size=args.train_batch_size )
__a =TensorDataset(*_snake_case )
__a =SequentialSampler(_snake_case )
__a =DataLoader(_snake_case , sampler=_snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a =args.max_steps
__a =args.max_steps // (len(_snake_case ) // args.gradient_accumulation_steps) + 1
else:
__a =len(_snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
__a =list(model.named_parameters() )
__a =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__a =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__a =AdamW(_snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
__a =get_linear_schedule_with_warmup(
_snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=_snake_case )
if args.do_train:
__a , __a , __a =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__a =0
__a =0
__a =tqdm(_snake_case , desc='Training' )
for step, batch in enumerate(_snake_case ):
__a =tuple(t.to(_snake_case ) for t in batch )
__a , __a , __a , __a =batch
__a =model(_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__a =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a ='Training loss: {:.2e} lr: {:.2e}'.format(_snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a =model.module if hasattr(_snake_case , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a =os.path.join(args.output_dir , _snake_case )
__a =os.path.join(args.output_dir , _snake_case )
torch.save(model_to_save.state_dict() , _snake_case )
model_to_save.config.to_json_file(_snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__a =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__a =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_snake_case )
if args.do_eval:
model.eval()
__a , __a =0, 0
__a , __a =0, 0
for batch in tqdm(_snake_case , desc='Evaluating' ):
__a =tuple(t.to(_snake_case ) for t in batch )
__a , __a , __a , __a =batch
with torch.no_grad():
__a , __a , __a , __a =model(
_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__a =mc_logits.detach().cpu().numpy()
__a =mc_labels.to('cpu' ).numpy()
__a =accuracy(_snake_case , _snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__a =eval_loss / nb_eval_steps
__a =eval_accuracy / nb_eval_examples
__a =tr_loss / nb_tr_steps if args.do_train else None
__a ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__a =os.path.join(args.output_dir , 'eval_results.txt' )
with open(_snake_case , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _snake_case , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 218 | 0 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = 0
lowerCamelCase_ = len(lowerCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCamelCase_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
lowerCamelCase_ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowerCamelCase_ = left
lowerCamelCase_ = point
elif point > right:
lowerCamelCase_ = right
lowerCamelCase_ = point
else:
if item < current_item:
lowerCamelCase_ = point - 1
else:
lowerCamelCase_ = point + 1
return None
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCamelCase_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , point + 1 , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
if collection != sorted(lowerCamelCase__ ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
__A =0
if debug == 1:
__A =[1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
__A =6_7
__A =interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('''Not found''')
| 366 |
from __future__ import annotations
import math
def lowerCamelCase_ ( lowerCamelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = str(lowerCamelCase__ )
lowerCamelCase_ = [n]
for i in range(1 , len(lowerCamelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(str(lowerCamelCase__ ) ) > 3:
if not is_prime(int(str(lowerCamelCase__ )[-3:] ) ) or not is_prime(int(str(lowerCamelCase__ )[:3] ) ):
return False
return True
def lowerCamelCase_ ( lowerCamelCase__ = 1_1 ):
lowerCamelCase_ = []
lowerCamelCase_ = 1_3
while len(lowerCamelCase__ ) != count:
if validate(lowerCamelCase__ ):
lowerCamelCase_ = list_truncated_nums(lowerCamelCase__ )
if all(is_prime(lowerCamelCase__ ) for i in list_nums ):
list_truncated_primes.append(lowerCamelCase__ )
num += 2
return list_truncated_primes
def lowerCamelCase_ ( ):
return sum(compute_truncated_primes(1_1 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""")
| 47 | 0 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A =input('Enter image url: ').strip()
print(f"""Downloading image from {url} ...""")
A =BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
A =soup.find('meta', {'property': 'og:image'})['content']
A =requests.get(image_url).content
A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 34 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""nielsr/canine-s""": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_UpperCAmelCase : Tuple = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Any = 0xE000
_UpperCAmelCase : Dict = 0xE001
_UpperCAmelCase : Optional[int] = 0xE002
_UpperCAmelCase : Tuple = 0xE003
_UpperCAmelCase : Tuple = 0xE004
# Maps special codepoints to human-readable names.
_UpperCAmelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_UpperCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=False , snake_case=2048 , **snake_case , ):
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , model_max_length=snake_case , **snake_case , )
# Creates a mapping for looking up the IDs of special symbols.
snake_case_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
snake_case_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
snake_case_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
snake_case_ = UNICODE_VOCAB_SIZE
snake_case_ = len(self._special_codepoints )
@property
def a ( self ):
return self._unicode_vocab_size
def a ( self , snake_case ):
return list(snake_case )
def a ( self , snake_case ):
try:
return ord(snake_case )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def a ( self , snake_case ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(snake_case )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def a ( self , snake_case ):
return "".join(snake_case )
def a ( self , snake_case , snake_case = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def a ( self , snake_case , snake_case = None , snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
snake_case_ = [1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
result += ([0] * len(snake_case )) + [1]
return result
def a ( self , snake_case , snake_case = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def a ( self , snake_case , snake_case = None ):
return ()
| 285 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _lowerCAmelCase ( lowercase_ = 100 ):
UpperCAmelCase = 1
UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase = pre_numerator
UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase = cur_numerator
UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(lowercase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 181 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = WavaVecaPhonemeCTCTokenizer
__UpperCamelCase = False
def UpperCAmelCase__ ( self :Optional[int] ) -> int:
super().setUp()
UpperCAmelCase = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase_ ) + '\n' )
def UpperCAmelCase__ ( self :Dict , lowercase_ :Any , lowercase_ :Union[str, Any]=False , lowercase_ :int=20 , lowercase_ :Dict=5 ) -> Tuple[str, list]:
UpperCAmelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase_ )) for i in range(len(lowercase_ ) )]
UpperCAmelCase = list(filter(lambda lowercase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase_ ) , lowercase_ ) )
if max_length is not None and len(lowercase_ ) > max_length:
UpperCAmelCase = toks[:max_length]
if min_length is not None and len(lowercase_ ) < min_length and len(lowercase_ ) > 0:
while len(lowercase_ ) < min_length:
UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ )
if " " not in output_txt and len(lowercase_ ) > 1:
UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase_ )
)
if with_prefix_space:
UpperCAmelCase = ' ' + output_txt
UpperCAmelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
return output_txt, output_ids
def UpperCAmelCase__ ( self :Union[str, Any] , **lowercase_ :Union[str, Any] ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase__ ( self :int ) -> str:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
UpperCAmelCase = tokenizer('m xxx ɪ' , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
UpperCAmelCase = tokenizer('m aaa ɪ ccc' , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
UpperCAmelCase = tokenizer('maɪ c' , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [3, 2_00] ) # mai should be <unk> (=3)
def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(lowercase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def UpperCAmelCase__ ( self :Dict ) -> int:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Dict:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
UpperCAmelCase = tokenizer.decode(sample_ids[0] )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def UpperCAmelCase__ ( self :Any ) -> str:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(lowercase_ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def UpperCAmelCase__ ( self :Any ) -> Any:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids )
def UpperCAmelCase__ ( self :Dict ) -> Union[str, Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
UpperCAmelCase = tokenizer.decode(sample_ids[0] )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
UpperCAmelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase_ )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , filter_word_delimiter_token=lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def UpperCAmelCase__ ( self :int ) -> int:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , lowercase_ )
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=lowercase_ )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer(lowercase_ , phonemizer_lang='en-us' ).input_ids
UpperCAmelCase = tokenizer(lowercase_ , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(lowercase_ , lowercase_ )
UpperCAmelCase = tokenizer.decode(lowercase_ )
UpperCAmelCase = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(lowercase_ , 'ɛ l o h aʊ a ʁ j u' )
def UpperCAmelCase__ ( self :int ) -> List[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how Are you'
UpperCAmelCase = 'hello how are you'
UpperCAmelCase = tokenizer(lowercase_ ).input_ids
UpperCAmelCase = tokenizer(lowercase_ ).input_ids
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> int:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def UpperCAmelCase__ ( lowercase_ :List[str] , lowercase_ :List[str] ) -> List[str]:
UpperCAmelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase__ ( self :str ) -> Optional[int]:
UpperCAmelCase = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCAmelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
UpperCAmelCase = tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ , filter_word_delimiter_token=lowercase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[int]:
UpperCAmelCase = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(lowercase_ :List[Any] , lowercase_ :str ):
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
self.assertTrue(isinstance(outputs_list[0] , lowercase_ ) )
# transform list to ModelOutput
UpperCAmelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(lowercase_ :Any , lowercase_ :str ):
if isinstance(lowercase_ , lowercase_ ):
[recursive_check(lowercase_ , lowercase_ ) for la, la in zip(lowercase_ , lowercase_ )]
self.assertEqual(lowercase_ , lowercase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , output_char_offsets=lowercase_ )
UpperCAmelCase = [tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ ) for ids in sample_ids]
check_list_tuples_equal(lowercase_ , lowercase_ )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def UpperCAmelCase__ ( self :Any ) -> str:
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def UpperCAmelCase__ ( self :str ) -> List[str]:
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def UpperCAmelCase__ ( self :List[str] ) -> int:
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
pass
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
UpperCAmelCase = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase = ['aaaaa bbbbbb', 'cccccccccdddddddd']
UpperCAmelCase = tokenizer.add_tokens(lowercase_ )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size + len(lowercase_ ) )
UpperCAmelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
UpperCAmelCase = tokenizer.add_special_tokens(lowercase_ )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size_a + len(lowercase_ ) )
UpperCAmelCase = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase__ ( self :Tuple ) -> Optional[Any]:
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase__ ( self :int ) -> Any:
pass
def UpperCAmelCase__ ( self :Tuple ) -> Dict:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
UpperCAmelCase = self.get_tokenizers(fast=lowercase_ , do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase_ )
self.assertIsInstance(output['text'] , lowercase_ )
| 181 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_lowerCamelCase : Tuple = 'Create a default config file for Accelerate with only a few flags set.'
def lowercase_ ( _UpperCAmelCase="no" , _UpperCAmelCase = default_json_config_file , _UpperCAmelCase = False ):
"""simple docstring"""
A_ : str = Path(_UpperCAmelCase )
path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
A_ : Optional[Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
A_ : str = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
A_ : int = torch.cuda.device_count()
A_ : int = num_gpus
A_ : Tuple = False
if num_gpus > 1:
A_ : Optional[int] = '''MULTI_GPU'''
else:
A_ : Union[str, Any] = '''NO'''
elif is_xpu_available() and use_xpu:
A_ : str = torch.xpu.device_count()
A_ : Optional[int] = num_xpus
A_ : List[str] = False
if num_xpus > 1:
A_ : Any = '''MULTI_XPU'''
else:
A_ : Optional[Any] = '''NO'''
elif is_npu_available():
A_ : Union[str, Any] = torch.npu.device_count()
A_ : Optional[int] = num_npus
A_ : Union[str, Any] = False
if num_npus > 1:
A_ : List[str] = '''MULTI_NPU'''
else:
A_ : Tuple = '''NO'''
else:
A_ : Union[str, Any] = 0
A_ : str = True
A_ : str = 1
A_ : List[Any] = '''NO'''
A_ : Dict = ClusterConfig(**_UpperCAmelCase )
config.to_json_file(_UpperCAmelCase )
return path
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = parser.add_parser('''default''' , parents=_UpperCAmelCase , help=_UpperCAmelCase , formatter_class=_UpperCAmelCase )
parser.add_argument(
'''--config_file''' , default=_UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=_UpperCAmelCase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=_UpperCAmelCase )
return parser
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : str = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 167 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class lowercase ( __UpperCAmelCase , __UpperCAmelCase):
__lowerCAmelCase : List[Any] = """convnextv2"""
def __init__( self : int , _lowerCamelCase : str=3 , _lowerCamelCase : str=4 , _lowerCamelCase : List[Any]=4 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : Union[str, Any]=0.02 , _lowerCamelCase : List[str]=1E-12 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=2_24 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Optional[Any]=None , **_lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : str = num_channels
A_ : int = patch_size
A_ : Union[str, Any] = num_stages
A_ : Any = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
A_ : Any = [3, 3, 9, 3] if depths is None else depths
A_ : Optional[int] = hidden_act
A_ : Tuple = initializer_range
A_ : int = layer_norm_eps
A_ : List[Any] = drop_path_rate
A_ : Union[str, Any] = image_size
A_ : Any = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
A_ , A_ : Tuple = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 167 | 1 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase="shi-labs/oneformer_demo" ) -> List[Any]:
"""simple docstring"""
with open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) as f:
A : List[str] = json.load(A__ )
A : List[Any] = {}
A : Union[str, Any] = []
A : Tuple = []
for key, info in class_info.items():
A : List[Any] = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(A__ ) )
A : List[Any] = thing_ids
A : Tuple = class_names
return metadata
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=7, lowerCamelCase__=3, lowerCamelCase__=30, lowerCamelCase__=400, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=[0.5, 0.5, 0.5], lowerCamelCase__=[0.5, 0.5, 0.5], lowerCamelCase__=10, lowerCamelCase__=False, lowerCamelCase__=255, lowerCamelCase__="shi-labs/oneformer_demo", lowerCamelCase__="ade20k_panoptic.json", lowerCamelCase__=10, ):
A : List[Any] = parent
A : str = batch_size
A : int = num_channels
A : Optional[Any] = min_resolution
A : int = max_resolution
A : Optional[Any] = do_resize
A : Optional[Any] = {"""shortest_edge""": 32, """longest_edge""": 1333} if size is None else size
A : Any = do_normalize
A : Any = image_mean
A : List[Any] = image_std
A : Any = class_info_file
A : List[Any] = prepare_metadata(UpperCamelCase_, UpperCamelCase_ )
A : Dict = num_text
A : Tuple = repo_path
# for the post_process_functions
A : Tuple = 2
A : Dict = 10
A : List[str] = 10
A : Optional[int] = 3
A : Union[str, Any] = 4
A : Tuple = num_labels
A : Union[str, Any] = do_reduce_labels
A : Any = ignore_index
def _lowerCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=False ):
if not batched:
A : Dict = image_inputs[0]
if isinstance(UpperCamelCase_, Image.Image ):
A , A : Tuple = image.size
else:
A , A : Optional[Any] = image.shape[1], image.shape[2]
if w < h:
A : List[str] = int(self.size["""shortest_edge"""] * h / w )
A : List[Any] = self.size["""shortest_edge"""]
elif w > h:
A : Optional[int] = self.size["""shortest_edge"""]
A : Tuple = int(self.size["""shortest_edge"""] * w / h )
else:
A : Union[str, Any] = self.size["""shortest_edge"""]
A : Optional[Any] = self.size["""shortest_edge"""]
else:
A : List[str] = []
for image in image_inputs:
A , A : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : Dict = max(UpperCamelCase_, key=lambda lowerCamelCase__ : item[0] )[0]
A : str = max(UpperCamelCase_, key=lambda lowerCamelCase__ : item[1] )[1]
return expected_height, expected_width
def _lowerCAmelCase ( self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ), masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ), )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__lowerCamelCase : Any = image_processing_class
def _lowerCAmelCase ( self ):
A : int = OneFormerImageProcessorTester(self )
@property
def _lowerCAmelCase ( self ):
return self.image_processing_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_, """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase_, """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase_, """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase_, """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase_, """size""" ) )
self.assertTrue(hasattr(UpperCamelCase_, """ignore_index""" ) )
self.assertTrue(hasattr(UpperCamelCase_, """class_info_file""" ) )
self.assertTrue(hasattr(UpperCamelCase_, """num_text""" ) )
self.assertTrue(hasattr(UpperCamelCase_, """repo_path""" ) )
self.assertTrue(hasattr(UpperCamelCase_, """metadata""" ) )
self.assertTrue(hasattr(UpperCamelCase_, """do_reduce_labels""" ) )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
# Initialize image_processor
A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Dict = prepare_image_inputs(self.image_processing_tester, equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_, Image.Image )
# Test not batched input
A : str = image_processor(image_inputs[0], ["""semantic"""], return_tensors="""pt""" ).pixel_values
A , A : Any = self.image_processing_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
A , A : str = self.image_processing_tester.get_expected_values(UpperCamelCase_, batched=UpperCamelCase_ )
A : Dict = image_processor(
UpperCamelCase_, ["""semantic"""] * len(UpperCamelCase_ ), return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def _lowerCAmelCase ( self ):
# Initialize image_processor
A : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : List[str] = prepare_image_inputs(self.image_processing_tester, equal_resolution=UpperCamelCase_, numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_, np.ndarray )
# Test not batched input
A : Any = image_processor(image_inputs[0], ["""semantic"""], return_tensors="""pt""" ).pixel_values
A , A : Tuple = self.image_processing_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
A , A : Tuple = self.image_processing_tester.get_expected_values(UpperCamelCase_, batched=UpperCamelCase_ )
A : Any = image_processor(
UpperCamelCase_, ["""semantic"""] * len(UpperCamelCase_ ), return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def _lowerCAmelCase ( self ):
# Initialize image_processor
A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Union[str, Any] = prepare_image_inputs(self.image_processing_tester, equal_resolution=UpperCamelCase_, torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_, torch.Tensor )
# Test not batched input
A : List[Any] = image_processor(image_inputs[0], ["""semantic"""], return_tensors="""pt""" ).pixel_values
A , A : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
A , A : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase_, batched=UpperCamelCase_ )
A : Dict = image_processor(
UpperCamelCase_, ["""semantic"""] * len(UpperCamelCase_ ), return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__="np" ):
A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
A : Any = self.image_processing_tester.num_labels
A : int = None
A : List[Any] = None
A : int = prepare_image_inputs(self.image_processing_tester, equal_resolution=UpperCamelCase_ )
if with_segmentation_maps:
A : str = num_labels
if is_instance_map:
A : Dict = list(range(UpperCamelCase_ ) ) * 2
A : Any = dict(enumerate(UpperCamelCase_ ) )
A : Any = [
np.random.randint(0, high * 2, (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
A : Dict = [Image.fromarray(UpperCamelCase_ ) for annotation in annotations]
A : Union[str, Any] = image_processor(
UpperCamelCase_, ["""semantic"""] * len(UpperCamelCase_ ), UpperCamelCase_, return_tensors="""pt""", instance_id_to_semantic_id=UpperCamelCase_, pad_and_return_pixel_mask=UpperCamelCase_, )
return inputs
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
def common(lowerCamelCase__=False, lowerCamelCase__=None ):
A : Optional[Any] = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase_, is_instance_map=UpperCamelCase_, segmentation_type=UpperCamelCase_ )
A : Tuple = inputs["""mask_labels"""]
A : Optional[int] = inputs["""class_labels"""]
A : Dict = inputs["""pixel_values"""]
A : Optional[Any] = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ ):
self.assertEqual(mask_label.shape[0], class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase_ ), self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase_ )
common(is_instance_map=UpperCamelCase_, segmentation_type="""pil""" )
common(is_instance_map=UpperCamelCase_, segmentation_type="""pil""" )
def _lowerCAmelCase ( self ):
A : Optional[int] = np.zeros((20, 50) )
A : Tuple = 1
A : Optional[int] = 1
A : Any = 1
A : Optional[Any] = binary_mask_to_rle(UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ), 4 )
self.assertEqual(rle[0], 21 )
self.assertEqual(rle[1], 45 )
def _lowerCAmelCase ( self ):
A : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="""ade20k_panoptic.json""", num_text=self.image_processing_tester.num_text, repo_path="""shi-labs/oneformer_demo""", )
A : int = self.image_processing_tester.get_fake_oneformer_outputs()
A : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ), self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape, (
self.image_processing_tester.height,
self.image_processing_tester.width,
), )
A : List[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
A : Tuple = fature_extractor.post_process_semantic_segmentation(UpperCamelCase_, target_sizes=UpperCamelCase_ )
self.assertEqual(segmentation[0].shape, target_sizes[0] )
def _lowerCAmelCase ( self ):
A : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="""ade20k_panoptic.json""", num_text=self.image_processing_tester.num_text, repo_path="""shi-labs/oneformer_demo""", )
A : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
A : Optional[int] = image_processor.post_process_instance_segmentation(UpperCamelCase_, threshold=0 )
self.assertTrue(len(UpperCamelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ), UpperCamelCase_ )
self.assertEqual(
el["""segmentation"""].shape, (self.image_processing_tester.height, self.image_processing_tester.width) )
def _lowerCAmelCase ( self ):
A : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="""ade20k_panoptic.json""", num_text=self.image_processing_tester.num_text, repo_path="""shi-labs/oneformer_demo""", )
A : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
A : int = image_processor.post_process_panoptic_segmentation(UpperCamelCase_, threshold=0 )
self.assertTrue(len(UpperCamelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ), UpperCamelCase_ )
self.assertEqual(
el["""segmentation"""].shape, (self.image_processing_tester.height, self.image_processing_tester.width) )
| 357 |
import os
from pathlib import Path
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
from torch.utils.cpp_extension import load
A : Any = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
A : int = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 115 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase_ , lowercase_=False , lowercase_=False , lowercase_=False ):
UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
for i in range(config.num_hidden_layers ):
UpperCAmelCase = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = dct.pop(UpperCAmelCase_ )
UpperCAmelCase = val
@torch.no_grad()
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCAmelCase_ )
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
if "vqa" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 3129
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''vqa2-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = ViltForQuestionAnswering(UpperCAmelCase_ )
elif "nlvr" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 2
UpperCAmelCase = {0: '''False''', 1: '''True'''}
UpperCAmelCase = {v: k for k, v in config.idalabel.items()}
UpperCAmelCase = 3
UpperCAmelCase = ViltForImagesAndTextClassification(UpperCAmelCase_ )
elif "irtr" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = ViltForImageAndTextRetrieval(UpperCAmelCase_ )
elif "mlm_itm" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = ViltForMaskedLM(UpperCAmelCase_ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location='cpu' )['''state_dict''']
UpperCAmelCase = create_rename_keys(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ )
if mlm_model or irtr_model:
UpperCAmelCase = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
UpperCAmelCase = model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCAmelCase_ )
# Define processor
UpperCAmelCase = ViltImageProcessor(size=384 )
UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCAmelCase = ViltProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
UpperCAmelCase = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=UpperCAmelCase_ ).raw )
UpperCAmelCase = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=UpperCAmelCase_ ).raw )
UpperCAmelCase = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
UpperCAmelCase = processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' )
UpperCAmelCase = processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' )
UpperCAmelCase = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
UpperCAmelCase = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=UpperCAmelCase_ ).raw )
if mlm_model:
UpperCAmelCase = '''a bunch of [MASK] laying on a [MASK].'''
else:
UpperCAmelCase = '''How many cats are there?'''
UpperCAmelCase = processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' )
UpperCAmelCase = model(**UpperCAmelCase_ )
# Verify outputs
if mlm_model:
UpperCAmelCase = torch.Size([1, 11, 30522] )
UpperCAmelCase = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCAmelCase_ , atol=1e-4 )
# verify masked token prediction equals "cats"
UpperCAmelCase = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
UpperCAmelCase = torch.Size([1, 3129] )
UpperCAmelCase = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCAmelCase_ , atol=1e-4 )
# verify vqa prediction equals "2"
UpperCAmelCase = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
UpperCAmelCase = torch.Size([1, 2] )
UpperCAmelCase = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""",
type=str,
help="""URL of the checkpoint you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
snake_case_ = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 78 |
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] | None = None , UpperCAmelCase_ : dict[str, float] | None = None , UpperCAmelCase_ : bool = False , ):
"""simple docstring"""
a :str = cipher_alphabet or [chr(UpperCAmelCase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
a :List[Any] = {
'''a''': 0.08497,
'''b''': 0.01492,
'''c''': 0.02202,
'''d''': 0.04253,
'''e''': 0.11162,
'''f''': 0.02228,
'''g''': 0.02015,
'''h''': 0.06094,
'''i''': 0.07546,
'''j''': 0.00153,
'''k''': 0.01292,
'''l''': 0.04025,
'''m''': 0.02406,
'''n''': 0.06749,
'''o''': 0.07507,
'''p''': 0.01929,
'''q''': 0.00095,
'''r''': 0.07587,
'''s''': 0.06327,
'''t''': 0.09356,
'''u''': 0.02758,
'''v''': 0.00978,
'''w''': 0.02560,
'''x''': 0.00150,
'''y''': 0.01994,
'''z''': 0.00077,
}
else:
# Custom frequencies dictionary
a :Dict = frequencies_dict
if not case_sensitive:
a :Union[str, Any] = ciphertext.lower()
# Chi squared statistic values
a :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(UpperCAmelCase_ ) ):
a :int = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
a :Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCAmelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
a :List[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
a :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
a :List[Any] = decrypted_with_shift.lower().count(UpperCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a :Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a :Any = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
a :int = decrypted_with_shift.count(UpperCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a :Tuple = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a :Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
a :Optional[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCAmelCase_ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
a :int = min(
UpperCAmelCase_ , key=UpperCAmelCase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
a
) , (
a
) ,
) :Optional[int] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 94 | 0 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = 0
snake_case = [0]
snake_case = [0]
snake_case = len(_lowerCamelCase )
self.assertEqual(k.knapsack(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ), 0 )
snake_case = [60]
snake_case = [10]
snake_case = len(_lowerCamelCase )
self.assertEqual(k.knapsack(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ), 0 )
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = 3
snake_case = [1, 2, 3]
snake_case = [3, 2, 1]
snake_case = len(_lowerCamelCase )
self.assertEqual(k.knapsack(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ), 5 )
def _lowerCamelCase ( self ) -> str:
snake_case = 50
snake_case = [60, 100, 120]
snake_case = [10, 20, 30]
snake_case = len(_lowerCamelCase )
self.assertEqual(k.knapsack(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ), 220 )
if __name__ == "__main__":
unittest.main()
| 368 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332 | 0 |
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( __UpperCAmelCase: list[float] ) -> List[str]:
return np.maximum(0 , __UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 201 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str , __UpperCAmelCase: str , __UpperCAmelCase: PreTrainedTokenizer , __UpperCAmelCase: int , __UpperCAmelCase: Optional[int] = None , ) -> List[Any]:
UpperCamelCase__ : Dict = {}
if train_file is not None:
UpperCamelCase__ : str = [train_file]
if eval_file is not None:
UpperCamelCase__ : Union[str, Any] = [eval_file]
if test_file is not None:
UpperCamelCase__ : Tuple = [test_file]
UpperCamelCase__ : Optional[Any] = datasets.load_dataset('''csv''' , data_files=__UpperCAmelCase )
UpperCamelCase__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
UpperCamelCase__ : str = features_name.pop(__UpperCAmelCase )
UpperCamelCase__ : List[str] = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCamelCase__ : Optional[Any] = {label: i for i, label in enumerate(__UpperCAmelCase )}
UpperCamelCase__ : Union[str, Any] = tokenizer.model_input_names
UpperCamelCase__ : str = {}
if len(__UpperCAmelCase ) == 1:
for k in files.keys():
UpperCamelCase__ : Optional[int] = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' ) , batched=__UpperCAmelCase , )
elif len(__UpperCAmelCase ) == 2:
for k in files.keys():
UpperCamelCase__ : Dict = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' , ) , batched=__UpperCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCamelCase__ : Any = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCamelCase__ : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCamelCase__ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : int = labelaid[ex[label_name]]
yield (d, label)
UpperCamelCase__ : Tuple = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCamelCase__ : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCamelCase__ : int = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCamelCase__ : Dict = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCamelCase__ : Optional[Any] = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCamelCase__ : Union[str, Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class lowercase__ :
'''simple docstring'''
a : int = field(metadata={"help": "Which column contains the label"} )
a : str = field(default=__lowerCamelCase , metadata={"help": "The path of the training file"} )
a : Optional[str] = field(default=__lowerCamelCase , metadata={"help": "The path of the development file"} )
a : Optional[str] = field(default=__lowerCamelCase , metadata={"help": "The path of the test file"} )
a : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a : bool = field(
default=__lowerCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowercase__ :
'''simple docstring'''
a : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : bool = field(default=__lowerCamelCase , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowerCAmelCase_ ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
f"16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__UpperCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCamelCase__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__UpperCAmelCase ) , labelaid=__UpperCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCamelCase__ : str = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__UpperCAmelCase: EvalPrediction ) -> Dict:
UpperCamelCase__ : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCamelCase__ : Union[str, Any] = TFTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ : List[str] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase__ : Tuple = trainer.evaluate()
UpperCamelCase__ : Optional[int] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(__UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
results.update(__UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 201 | 1 |
import math
from numpy import inf
from scipy.integrate import quad
def UpperCAmelCase__ ( lowerCamelCase ):
if num <= 0:
raise ValueError("math domain error" )
return quad(lowerCamelCase, 0, lowerCamelCase, args=(lowerCamelCase) )[0]
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
return math.pow(lowerCamelCase, z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 158 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCAmelCase__ ( lowerCamelCase ):
if is_torch_version("<", "2.0.0" ) or not hasattr(lowerCamelCase, "_dynamo" ):
return False
return isinstance(lowerCamelCase, torch._dynamo.eval_frame.OptimizedModule )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = True ):
lowercase :Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase :str = is_compiled_module(lowerCamelCase )
if is_compiled:
lowercase :str = model
lowercase :str = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCamelCase, lowerCamelCase ):
lowercase :Any = model.module
if not keep_fpaa_wrapper:
lowercase :List[Any] = getattr(lowerCamelCase, "forward" )
lowercase :Union[str, Any] = model.__dict__.pop("_original_forward", lowerCamelCase )
if original_forward is not None:
while hasattr(lowerCamelCase, "__wrapped__" ):
lowercase :Tuple = forward.__wrapped__
if forward == original_forward:
break
lowercase :Tuple = forward
if getattr(lowerCamelCase, "_converted_to_transformer_engine", lowerCamelCase ):
convert_model(lowerCamelCase, to_transformer_engine=lowerCamelCase )
if is_compiled:
lowercase :List[Any] = model
lowercase :Optional[int] = compiled_model
return model
def UpperCAmelCase__ ( ):
PartialState().wait_for_everyone()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCamelCase, lowerCamelCase )
elif PartialState().local_process_index == 0:
torch.save(lowerCamelCase, lowerCamelCase )
@contextmanager
def UpperCAmelCase__ ( **lowerCamelCase ):
for key, value in kwargs.items():
lowercase :List[str] = str(lowerCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCAmelCase__ ( lowerCamelCase ):
if not hasattr(lowerCamelCase, "__qualname__" ) and not hasattr(lowerCamelCase, "__name__" ):
lowercase :Optional[int] = getattr(lowerCamelCase, "__class__", lowerCamelCase )
if hasattr(lowerCamelCase, "__qualname__" ):
return obj.__qualname__
if hasattr(lowerCamelCase, "__name__" ):
return obj.__name__
return str(lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
for key, value in source.items():
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase :Tuple = destination.setdefault(lowerCamelCase, {} )
merge_dicts(lowerCamelCase, lowerCamelCase )
else:
lowercase :Optional[Any] = value
return destination
def UpperCAmelCase__ ( lowerCamelCase = None ):
if port is None:
lowercase :Tuple = 29500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 158 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_a = logging.get_logger(__name__)
# General docstring
_a = '''RegNetConfig'''
# Base docstring
_a = '''facebook/regnet-y-040'''
_a = [1, 1088, 7, 7]
# Image classification docstring
_a = '''facebook/regnet-y-040'''
_a = '''tabby, tabby cat'''
_a = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 3 , UpperCAmelCase = 1 , UpperCAmelCase = 1 , UpperCAmelCase = "relu" , ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , )
_UpperCAmelCase = nn.BatchNormad(lowerCAmelCase__ )
_UpperCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.convolution(lowerCAmelCase__ )
_UpperCAmelCase = self.normalization(lowerCAmelCase__ )
_UpperCAmelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_UpperCAmelCase = config.num_channels
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_UpperCAmelCase = self.embedder(lowerCAmelCase__ )
return hidden_state
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 2 ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
_UpperCAmelCase = nn.BatchNormad(lowerCAmelCase__ )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.convolution(lowerCAmelCase__ )
_UpperCAmelCase = self.normalization(lowerCAmelCase__ )
return hidden_state
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
_UpperCAmelCase = nn.Sequential(
nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.pooler(lowerCAmelCase__ )
_UpperCAmelCase = self.attention(lowerCAmelCase__ )
_UpperCAmelCase = hidden_state * attention
return hidden_state
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = in_channels != out_channels or stride != 1
_UpperCAmelCase = max(1 , out_channels // config.groups_width )
_UpperCAmelCase = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCAmelCase = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
_UpperCAmelCase = ACTaFN[config.hidden_act]
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = hidden_state
_UpperCAmelCase = self.layer(lowerCAmelCase__ )
_UpperCAmelCase = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
_UpperCAmelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = in_channels != out_channels or stride != 1
_UpperCAmelCase = max(1 , out_channels // config.groups_width )
_UpperCAmelCase = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCAmelCase = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(lowerCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
_UpperCAmelCase = ACTaFN[config.hidden_act]
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = hidden_state
_UpperCAmelCase = self.layer(lowerCAmelCase__ )
_UpperCAmelCase = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
_UpperCAmelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 2 , UpperCAmelCase = 2 , ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
_UpperCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for _ in range(depth - 1 )] , )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.layers(lowerCAmelCase__ )
return hidden_state
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_UpperCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = True ):
"""simple docstring"""
_UpperCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase = hidden_states + (hidden_state,)
_UpperCAmelCase = stage_module(lowerCAmelCase__ )
if output_hidden_states:
_UpperCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ )
class __lowerCamelCase ( __UpperCamelCase):
"""simple docstring"""
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = 'regnet'
UpperCamelCase__ = 'pixel_values'
UpperCamelCase__ = True
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=False ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase = value
_a = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_a = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __lowerCamelCase ( __UpperCamelCase):
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ )
_UpperCAmelCase = config
_UpperCAmelCase = RegNetEmbeddings(lowerCAmelCase__ )
_UpperCAmelCase = RegNetEncoder(lowerCAmelCase__ )
_UpperCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ):
"""simple docstring"""
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.embedder(lowerCAmelCase__ )
_UpperCAmelCase = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCAmelCase = encoder_outputs[0]
_UpperCAmelCase = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __lowerCamelCase ( __UpperCamelCase):
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ )
_UpperCAmelCase = config.num_labels
_UpperCAmelCase = RegNetModel(lowerCAmelCase__ )
# classification head
_UpperCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ):
"""simple docstring"""
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.regnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCAmelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase = self.classifier(lowerCAmelCase__ )
_UpperCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCAmelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCAmelCase = 'single_label_classification'
else:
_UpperCAmelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
_UpperCAmelCase = MSELoss()
if self.num_labels == 1:
_UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCAmelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
_UpperCAmelCase = CrossEntropyLoss()
_UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCAmelCase = BCEWithLogitsLoss()
_UpperCAmelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
_UpperCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
| 39 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = '''RegNetConfig'''
# Base docstring
__lowerCAmelCase = '''facebook/regnet-y-040'''
__lowerCAmelCase = [1, 10_88, 7, 7]
# Image classification docstring
__lowerCAmelCase = '''facebook/regnet-y-040'''
__lowerCAmelCase = '''tabby, tabby cat'''
__lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = "relu" , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase__: Any = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , )
lowercase__: str = nn.BatchNormad(lowerCAmelCase__ )
lowercase__: Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = self.convolution(lowerCAmelCase__ )
lowercase__: Optional[Any] = self.normalization(lowerCAmelCase__ )
lowercase__: Union[str, Any] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase__: Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase__: Dict = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowercase__: Optional[int] = self.embedder(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase__: Optional[Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase__: Union[str, Any] = nn.BatchNormad(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tensor:
'''simple docstring'''
lowercase__: Any = self.convolution(lowerCAmelCase__ )
lowercase__: str = self.normalization(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase__: Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__: str = nn.Sequential(
nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
# b c h w -> b c 1 1
lowercase__: str = self.pooler(lowerCAmelCase__ )
lowercase__: List[str] = self.attention(lowerCAmelCase__ )
lowercase__: List[Any] = hidden_state * attention
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: str = in_channels != out_channels or stride != 1
lowercase__: Optional[int] = max(1 , out_channels // config.groups_width )
lowercase__: Union[str, Any] = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
lowercase__: Tuple = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Dict = hidden_state
lowercase__: Union[str, Any] = self.layer(lowerCAmelCase__ )
lowercase__: int = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: Optional[int] = in_channels != out_channels or stride != 1
lowercase__: List[str] = max(1 , out_channels // config.groups_width )
lowercase__: Any = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: str = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(lowerCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
lowercase__: Union[str, Any] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[Any] = hidden_state
lowercase__: Optional[int] = self.layer(lowerCAmelCase__ )
lowercase__: str = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , ) -> Tuple:
'''simple docstring'''
super().__init__()
lowercase__: Optional[int] = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
lowercase__: str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: str = self.layers(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: int = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__: int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
lowercase__: List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__: Optional[Any] = hidden_states + (hidden_state,)
lowercase__: List[Any] = stage_module(lowerCAmelCase__ )
if output_hidden_states:
lowercase__: Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ )
class __a ( __UpperCamelCase ):
__lowercase : Dict = RegNetConfig
__lowercase : Dict = 'regnet'
__lowercase : str = 'pixel_values'
__lowercase : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Any = value
__lowerCAmelCase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCAmelCase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: Tuple = config
lowercase__: List[str] = RegNetEmbeddings(lowerCAmelCase__ )
lowercase__: Optional[int] = RegNetEncoder(lowerCAmelCase__ )
lowercase__: Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowercase__: List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Any = self.embedder(lowerCAmelCase__ )
lowercase__: List[Any] = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Optional[Any] = encoder_outputs[0]
lowercase__: Optional[int] = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: Dict = config.num_labels
lowercase__: Dict = RegNetModel(lowerCAmelCase__ )
# classification head
lowercase__: str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
lowercase__: str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Optional[int] = self.regnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Dict = outputs.pooler_output if return_dict else outputs[1]
lowercase__: List[str] = self.classifier(lowerCAmelCase__ )
lowercase__: Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__: Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__: Optional[int] = 'single_label_classification'
else:
lowercase__: Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
lowercase__: List[Any] = MSELoss()
if self.num_labels == 1:
lowercase__: Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
lowercase__: Dict = CrossEntropyLoss()
lowercase__: Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__: List[Any] = BCEWithLogitsLoss()
lowercase__: Any = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
lowercase__: int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
| 196 | 0 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
lowerCamelCase_ : List[str] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
lowerCamelCase_ : Tuple = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[str] = CamembertTokenizer
__UpperCamelCase : List[str] = CamembertTokenizerFast
__UpperCamelCase : Dict = True
__UpperCamelCase : Dict = True
def lowerCAmelCase__ ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_: List[str] = CamembertTokenizer(snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = """<pad>"""
UpperCamelCase_: Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 1004 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Union[str, Any] = CamembertTokenizer(snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
UpperCamelCase_: str = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCamelCase_: Any = """I was born in 92000, and this is falsé."""
UpperCamelCase_: List[str] = tokenizer.encode(snake_case_ )
UpperCamelCase_: str = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: Optional[int] = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCamelCase_: int = tokenizer.convert_ids_to_tokens(snake_case_ )
UpperCamelCase_: Dict = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: str = self.get_rust_tokenizer()
UpperCamelCase_: List[str] = """I was born in 92000, and this is falsé."""
UpperCamelCase_: Optional[Any] = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: Optional[Any] = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Any = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: Optional[int] = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: List[Any] = tokenizer.encode(snake_case_ )
UpperCamelCase_: Optional[Any] = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : str ):
# fmt: off
UpperCamelCase_: str = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCamelCase_: List[Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=snake_case_ , )
| 223 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCamelCase_ : Any = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCamelCase_ : str = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
lowerCamelCase_ : Union[str, Any] = {
"""RUCAIBox/mvp""": 10_24,
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Any = ["""input_ids""", """attention_mask"""]
__UpperCamelCase : int = MvpTokenizer
def __init__( self : Union[str, Any] , snake_case_ : Any=None , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None , snake_case_ : Tuple="replace" , snake_case_ : Dict="<s>" , snake_case_ : Dict="</s>" , snake_case_ : Tuple="</s>" , snake_case_ : int="<s>" , snake_case_ : Tuple="<unk>" , snake_case_ : Optional[int]="<pad>" , snake_case_ : Any="<mask>" , snake_case_ : Union[str, Any]=False , snake_case_ : Optional[int]=True , **snake_case_ : Union[str, Any] , ):
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
UpperCamelCase_: Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , snake_case_ ) != add_prefix_space:
UpperCamelCase_: str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) )
UpperCamelCase_: int = add_prefix_space
UpperCamelCase_: int = pre_tok_class(**snake_case_ )
UpperCamelCase_: int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase_: Tuple = """post_processor"""
UpperCamelCase_: Optional[int] = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
UpperCamelCase_: Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase_: Union[str, Any] = tuple(state["""sep"""] )
if "cls" in state:
UpperCamelCase_: List[Any] = tuple(state["""cls"""] )
UpperCamelCase_: List[str] = False
if state.get("""add_prefix_space""" , snake_case_ ) != add_prefix_space:
UpperCamelCase_: str = add_prefix_space
UpperCamelCase_: Union[str, Any] = True
if state.get("""trim_offsets""" , snake_case_ ) != trim_offsets:
UpperCamelCase_: Optional[int] = trim_offsets
UpperCamelCase_: Any = True
if changes_to_apply:
UpperCamelCase_: Optional[int] = getattr(snake_case_ , state.pop("""type""" ) )
UpperCamelCase_: Union[str, Any] = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Dict ):
UpperCamelCase_: List[str] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
UpperCamelCase_: Optional[int] = value
def lowerCAmelCase__ ( self : Optional[int] , *snake_case_ : Dict , **snake_case_ : Optional[int] ):
UpperCamelCase_: Optional[int] = kwargs.get("""is_split_into_words""" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] , *snake_case_ : Dict , **snake_case_ : Tuple ):
UpperCamelCase_: Any = kwargs.get("""is_split_into_words""" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : str , snake_case_ : Optional[str] = None ):
UpperCamelCase_: Optional[int] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Any=None ):
UpperCamelCase_: str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
UpperCamelCase_: Tuple = [self.sep_token_id]
UpperCamelCase_: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 223 | 1 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCamelCase( __UpperCamelCase : Dict ):
if is_torch_version('''<''' ,'''2.0.0''' ) or not hasattr(__UpperCamelCase ,'''_dynamo''' ):
return False
return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule )
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : bool = True ):
lowerCAmelCase_ : List[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCAmelCase_ : Optional[Any] = is_compiled_module(__UpperCamelCase )
if is_compiled:
lowerCAmelCase_ : Optional[Any] = model
lowerCAmelCase_ : str = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCAmelCase_ : int = model.module
if not keep_fpaa_wrapper:
lowerCAmelCase_ : List[Any] = getattr(__UpperCamelCase ,'''forward''' )
lowerCAmelCase_ : Any = model.__dict__.pop('''_original_forward''' ,__UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase ,'''__wrapped__''' ):
lowerCAmelCase_ : Tuple = forward.__wrapped__
if forward == original_forward:
break
lowerCAmelCase_ : str = forward
if getattr(__UpperCamelCase ,'''_converted_to_transformer_engine''' ,__UpperCamelCase ):
convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase )
if is_compiled:
lowerCAmelCase_ : List[str] = model
lowerCAmelCase_ : str = compiled_model
return model
def UpperCamelCase( ):
PartialState().wait_for_everyone()
def UpperCamelCase( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase ,__UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase ,__UpperCamelCase )
@contextmanager
def UpperCamelCase( **__UpperCamelCase : Optional[int] ):
for key, value in kwargs.items():
lowerCAmelCase_ : Dict = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCamelCase( __UpperCamelCase : Optional[int] ):
if not hasattr(__UpperCamelCase ,'''__qualname__''' ) and not hasattr(__UpperCamelCase ,'''__name__''' ):
lowerCAmelCase_ : Dict = getattr(__UpperCamelCase ,'''__class__''' ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,'''__qualname__''' ):
return obj.__qualname__
if hasattr(__UpperCamelCase ,'''__name__''' ):
return obj.__name__
return str(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : int ):
for key, value in source.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCAmelCase_ : Optional[int] = destination.setdefault(__UpperCamelCase ,{} )
merge_dicts(__UpperCamelCase ,__UpperCamelCase )
else:
lowerCAmelCase_ : Tuple = value
return destination
def UpperCamelCase( __UpperCamelCase : int = None ):
if port is None:
lowerCAmelCase_ : str = 29500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 103 |
'''simple docstring'''
class A__ :
def __init__( self : Union[str, Any] , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =[0] * size
_SCREAMING_SNAKE_CASE =[0] * size
@staticmethod
def A ( _a : int ) -> int:
'''simple docstring'''
return index | (index + 1)
@staticmethod
def A ( _a : int ) -> int:
'''simple docstring'''
return (index & (index + 1)) - 1
def A ( self : Tuple , _a : int , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =value
while index < self.size:
_SCREAMING_SNAKE_CASE =self.get_prev(_a ) + 1
if current_left_border == index:
_SCREAMING_SNAKE_CASE =value
else:
_SCREAMING_SNAKE_CASE =max(_a , _a , _a )
_SCREAMING_SNAKE_CASE =self.get_next(_a )
def A ( self : int , _a : int , _a : int ) -> int:
'''simple docstring'''
right -= 1 # Because of right is exclusive
_SCREAMING_SNAKE_CASE =0
while left <= right:
_SCREAMING_SNAKE_CASE =self.get_prev(_a )
if left <= current_left:
_SCREAMING_SNAKE_CASE =max(_a , self.tree[right] )
_SCREAMING_SNAKE_CASE =current_left
else:
_SCREAMING_SNAKE_CASE =max(_a , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
if n_term == "":
return []
__magic_name__ : list = []
for temp in range(int(lowerCAmelCase ) ):
series.append(f'1/{temp + 1}' if series else '1' )
return series
if __name__ == "__main__":
lowerCAmelCase :int = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term)) | 275 |
'''simple docstring'''
import socket
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__magic_name__ : Union[str, Any] = socket.gethostname()
__magic_name__ : int = 1_2312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
__magic_name__ : Optional[int] = sock.recv(1024 )
if not data:
break
out_file.write(lowerCAmelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main() | 275 | 1 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : Union[str, Any] = 1.5
UpperCAmelCase__ : Union[str, Any] = int(factor * num_class_images )
UpperCAmelCase__ : Optional[int] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowerCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCAmelCase__ : Union[str, Any] = client.query(text=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCAmelCase__ : Tuple = int(factor * num_images )
UpperCAmelCase__ : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowerCAmelCase__ , aesthetic_weight=0.1 , )
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : int = tqdm(desc='''downloading real regularization images''' , total=lowerCAmelCase__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
UpperCAmelCase__ : Any = class_images[count]
count += 1
try:
UpperCAmelCase__ : str = requests.get(images['''url'''] )
if img.status_code == 2_00:
UpperCAmelCase__ : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a__ ( ) -> Optional[int]:
UpperCAmelCase__ : Any = argparse.ArgumentParser('''''' , add_help=lowerCAmelCase__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=lowerCAmelCase__ , type=lowerCAmelCase__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=lowerCAmelCase__ , type=lowerCAmelCase__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=lowerCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
UpperCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 181 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
UpperCamelCase__ = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
UpperCamelCase__ = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="dummy_doc" ) -> Dict:
UpperCAmelCase__ : List[str] = {doc: key_lines}
UpperCAmelCase__ : int = {doc: sys_lines}
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase__ : int = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : str = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase__ : str = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
if remove_nested:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase__ : Dict = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : str = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : str = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Optional[int] = 0
for name, metric in metrics:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
UpperCAmelCase__ : Any = (conll / 3) * 1_00
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
UpperCAmelCase__ : str = line.split()[5]
if not parse_col == "-":
UpperCAmelCase__ : Tuple = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Tuple , _A : Dict=True , _A : Optional[int]=False , _A : str=False , _A : List[str]=False ):
'''simple docstring'''
UpperCAmelCase__ : Any = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
UpperCAmelCase__ : int = util.check_gold_parse_annotation(_A )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase__ : List[str] = evaluate(
key_lines=_A , sys_lines=_A , metrics=_A , NP_only=_A , remove_nested=_A , keep_singletons=_A , min_span=_A , )
return score
| 181 | 1 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Any = '''vision-encoder-decoder'''
UpperCamelCase : List[Any] = True
def __init__( self : List[Any] , **UpperCAmelCase__ : Optional[int] ) -> Any:
super().__init__(**UpperCAmelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
_a : List[str] = kwargs.pop("""encoder""" )
_a : Optional[int] = encoder_config.pop("""model_type""" )
_a : Optional[Any] = kwargs.pop("""decoder""" )
_a : List[Any] = decoder_config.pop("""model_type""" )
_a : int = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[Any] = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[Any] = True
@classmethod
def _lowercase ( cls : str , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[Any] ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_a : Optional[int] = True
_a : List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> str:
_a : List[str] = copy.deepcopy(self.__dict__ )
_a : Any = self.encoder.to_dict()
_a : Union[str, Any] = self.decoder.to_dict()
_a : Tuple = self.__class__.model_type
return output
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Optional[int] = version.parse('''1.11''' )
@property
def _lowercase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowercase ( self : Dict ) -> float:
return 1E-4
@property
def _lowercase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class UpperCamelCase ( snake_case_ ):
@property
def _lowercase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
_a : Optional[int] = OrderedDict()
_a : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_a : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_a : Union[str, Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
import torch
_a : int = OrderedDict()
_a : Any = super().generate_dummy_inputs(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
_a , _a : List[Any] = dummy_input["""input_ids"""].shape
_a : Tuple = (batch, encoder_sequence, self._config.encoder_hidden_size)
_a : Optional[int] = dummy_input.pop("""input_ids""" )
_a : List[Any] = dummy_input.pop("""attention_mask""" )
_a : Tuple = torch.zeros(UpperCAmelCase__ )
return common_inputs
class UpperCamelCase ( snake_case_ ):
@property
def _lowercase ( self : Optional[int] ) -> None:
pass
def _lowercase ( self : Any , UpperCAmelCase__ : PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ )
def _lowercase ( self : int , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" ) -> OnnxConfig:
_a : str = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
| 324 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_snake_case = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_snake_case = {
'camembert-base': 512,
}
_snake_case = '▁'
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Dict = ['''input_ids''', '''attention_mask''']
UpperCamelCase : Optional[Any] = CamembertTokenizer
def __init__( self : int , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Optional[int]="</s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Tuple="<s>" , UpperCAmelCase__ : Tuple="<unk>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : int="<mask>" , UpperCAmelCase__ : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCAmelCase__ : Optional[Any] , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
_a : List[Any] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
_a : int = vocab_file
_a : int = False if not self.vocab_file else True
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : List[Any] = [self.cls_token_id]
_a : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Union[str, Any] = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : List[str] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 324 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase__ : Dict = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase__ : Dict = {
'distilbert-base-uncased': 512,
'distilbert-base-uncased-distilled-squad': 512,
'distilbert-base-cased': 512,
'distilbert-base-cased-distilled-squad': 512,
'distilbert-base-german-cased': 512,
'distilbert-base-multilingual-cased': 512,
}
UpperCAmelCase__ : List[Any] = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
__UpperCamelCase : Dict = DistilBertTokenizer
def __init__( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]="[UNK]" , lowerCAmelCase_ : Dict="[SEP]" , lowerCAmelCase_ : Optional[int]="[PAD]" , lowerCAmelCase_ : Optional[int]="[CLS]" , lowerCAmelCase_ : Tuple="[MASK]" , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A: Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_A: int = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_A: int = do_lower_case
_A: List[Any] = strip_accents
_A: Optional[int] = tokenize_chinese_chars
_A: Union[str, Any] = normalizer_class(**lowerCAmelCase_ )
_A: Tuple = do_lower_case
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any=None ):
"""simple docstring"""
_A: Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Dict , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
_A: Union[str, Any] = [self.sep_token_id]
_A: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
_A: Tuple = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 121 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowerCamelCase__ ( a , a ) -> Any:
_A: Any = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_A: Optional[int] = DatasetInfosDict.from_directory(a )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowerCamelCase__ ( a , a ) -> Any:
_A: int = str(a )
dataset_info.write_to_directory(a )
_A: str = DatasetInfo.from_directory(a )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(a , '''dataset_info.json''' ) )
def lowerCamelCase__ ( ) -> Any:
_A: int = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
_A: Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(a ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_A: str = yaml.safe_dump(a )
_A: Optional[int] = yaml.safe_load(a )
assert dataset_info_yaml_dict == reloaded
def lowerCamelCase__ ( ) -> int:
_A: Union[str, Any] = DatasetInfo()
_A: Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def lowerCamelCase__ ( a , a ) -> Optional[int]:
_A: Optional[int] = str(a )
dataset_infos_dict.write_to_directory(a )
_A: Union[str, Any] = DatasetInfosDict.from_directory(a )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_A: Optional[Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_A: Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(a , '''README.md''' ) )
| 121 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=4 , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Dict = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : str = is_training
snake_case_ : str = use_attention_mask
snake_case_ : List[Any] = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : int = hidden_act
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Optional[int] = type_sequence_label_size
snake_case_ : Tuple = initializer_range
snake_case_ : Union[str, Any] = num_choices
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : List[str] = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : int = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__magic_name__ , )
return config, input_ids, attention_mask
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : List[str] = config_and_inputs
snake_case_ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : List[str] = model_class_name.from_pretrained('''distilbert-base-uncased''' )
snake_case_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
snake_case_ : List[str] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case_ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ )[0]
snake_case_ : str = (1, 11, 768)
self.assertEqual(output.shape , __magic_name__ )
snake_case_ : List[str] = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 279 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('''KEY''')
lowerCAmelCase_ = TypeVar('''VAL''')
@dataclass(frozen=_a, slots=_a )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowerCamelCase_ : KEY
lowerCamelCase_ : VAL
class __lowerCAmelCase ( _Item ):
def __init__(self ) -> None:
'''simple docstring'''
super().__init__(__magic_name__ , __magic_name__ )
def __bool__(self ) -> bool:
'''simple docstring'''
return False
lowerCAmelCase_ = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__(self , __magic_name__ = 8 , __magic_name__ = 0.75 ) -> None:
'''simple docstring'''
snake_case_ : List[Any] = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : List[str] = capacity_factor
snake_case_ : int = 0
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return hash(__magic_name__ ) % len(self._buckets )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : Optional[Any] = _Item(__magic_name__ , __magic_name__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : List[Any] = _Item(__magic_name__ , __magic_name__ )
return True
else:
return False
def lowerCamelCase (self ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__magic_name__ )
def lowerCamelCase (self ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : int = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCamelCase (self , __magic_name__ ) -> None:
'''simple docstring'''
snake_case_ : List[str] = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Optional[int] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCamelCase (self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def lowerCamelCase (self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def lowerCamelCase (self , __magic_name__ ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : Dict = self._get_bucket_index(__magic_name__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : Tuple = self._get_next_ind(__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(__magic_name__ ):
if self._try_set(__magic_name__ , __magic_name__ , __magic_name__ ):
break
def __setitem__(self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__magic_name__ , __magic_name__ )
def __delitem__(self , __magic_name__ ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(__magic_name__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
raise KeyError(__magic_name__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : Union[str, Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self , __magic_name__ ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(__magic_name__ ):
snake_case_ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__magic_name__ )
def __len__(self ) -> int:
'''simple docstring'''
return self._len
def __iter__(self ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = ''' ,'''.join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 279 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
__lowercase : List[Any] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowerCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
__lowercase : Optional[int] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowerCamelCase ( self ) -> int:
torch.manual_seed(0 )
__lowercase : Tuple = AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
__lowercase : Optional[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase : Any = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__lowercase : List[str] = DDPMScheduler()
__lowercase : Tuple = AudioDiffusionPipeline(vqvae=_SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
__lowercase : Optional[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowercase : Any = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
__lowercase : Union[str, Any] = pipe(generator=_SCREAMING_SNAKE_CASE , steps=4 )
__lowercase : Dict = output.audios[0]
__lowercase : List[str] = output.images[0]
__lowercase : Union[str, Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
__lowercase : Union[str, Any] = pipe(generator=_SCREAMING_SNAKE_CASE , steps=4 , return_dict=_SCREAMING_SNAKE_CASE )
__lowercase : Union[str, Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__lowercase : Tuple = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__lowercase : Optional[Any] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
__lowercase : int = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__lowercase : Optional[int] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__lowercase : Dict = DDIMScheduler()
__lowercase : Any = self.dummy_vqvae_and_unet
__lowercase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
__lowercase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
np.random.seed(0 )
__lowercase : Tuple = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__lowercase : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
__lowercase : Optional[int] = pipe(raw_audio=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
__lowercase : Optional[int] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__lowercase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__lowercase : Tuple = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__lowercase : Dict = self.dummy_unet_condition
__lowercase : str = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_SCREAMING_SNAKE_CASE , mel=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
__lowercase : Optional[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
np.random.seed(0 )
__lowercase : List[str] = torch.rand((1, 1, 10) )
__lowercase : List[Any] = pipe(generator=_SCREAMING_SNAKE_CASE , encoding=_SCREAMING_SNAKE_CASE )
__lowercase : Union[str, Any] = output.images[0]
__lowercase : int = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__lowercase : Optional[int] = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Optional[int] = torch_device
__lowercase : List[Any] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
__lowercase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowercase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
__lowercase : Optional[int] = pipe(generator=_SCREAMING_SNAKE_CASE )
__lowercase : Dict = output.audios[0]
__lowercase : List[str] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__lowercase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
__lowercase : Dict = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 249 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowerCamelCase = logging.getLogger(__name__)
class _a ( _lowercase):
_a : Dict = '''summarization'''
_a : int = ['''loss''']
_a : Optional[Any] = ROUGE_KEYS
_a : Any = '''rouge2'''
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : Dict )-> int:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase__ : int = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , mode=self.mode , **_SCREAMING_SNAKE_CASE )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
lowerCAmelCase__ : List[Any] = Path(self.output_dir ) / '''metrics.json'''
lowerCAmelCase__ : int = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : List[Any] = defaultdict(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = self.config.model_type
lowerCAmelCase__ : Any = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
lowerCAmelCase__ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCAmelCase__ : List[Any] = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
lowerCAmelCase__ : Union[str, Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase__ : Tuple = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase__ : List[str] = get_git_info()['''repo_sha''']
lowerCAmelCase__ : int = hparams.num_workers
lowerCAmelCase__ : Optional[int] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase__ : List[Any] = self.decoder_start_token_id
lowerCAmelCase__ : int = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : List[str] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase__ : int = self.hparams.eval_max_gen_length
else:
lowerCAmelCase__ : Tuple = self.model.config.max_length
lowerCAmelCase__ : List[str] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] )-> Dict[str, List[str]]:
lowerCAmelCase__ : Any = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_SCREAMING_SNAKE_CASE , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
lowerCAmelCase__ : Union[str, Any] = True
return readable_batch
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , **_SCREAMING_SNAKE_CASE : Tuple )-> Tuple:
return self.model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[int] )-> Tuple:
lowerCAmelCase__ : int = self.tokenizer.batch_decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
return lmap(str.strip , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : dict )-> Tuple:
lowerCAmelCase__ : Any = self.tokenizer.pad_token_id
lowerCAmelCase__ , lowerCAmelCase__ : int = batch['''input_ids'''], batch['''attention_mask''']
lowerCAmelCase__ : Optional[Any] = batch['''labels''']
if isinstance(self.model , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : List[str] = self.model._shift_right(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : Any = shift_tokens_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase__ : Any = decoder_input_ids
self.save_readable_batch(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = self(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase__ : Optional[int] = nn.CrossEntropyLoss(ignore_index=_SCREAMING_SNAKE_CASE )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase__ : Optional[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = label_smoothed_nll_loss(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.hparams.label_smoothing , ignore_index=_SCREAMING_SNAKE_CASE )
return (loss,)
@property
def UpperCAmelCase__( self : Optional[Any] )-> int:
return self.tokenizer.pad_token_id
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )-> Dict:
lowerCAmelCase__ : List[Any] = self._step(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = dict(zip(self.loss_names , _SCREAMING_SNAKE_CASE ) )
# tokens per batch
lowerCAmelCase__ : Dict = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
lowerCAmelCase__ : Optional[int] = batch['''input_ids'''].shape[0]
lowerCAmelCase__ : List[Any] = batch['''input_ids'''].eq(self.pad ).sum()
lowerCAmelCase__ : List[str] = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] )-> Dict:
return self._generative_step(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple="val" )-> Dict:
self.step_count += 1
lowerCAmelCase__ : Any = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase__ : Union[str, Any] = losses['''loss''']
lowerCAmelCase__ : Union[str, Any] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
lowerCAmelCase__ : Optional[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase__ : torch.FloatTensor = torch.tensor(_SCREAMING_SNAKE_CASE ).type_as(_SCREAMING_SNAKE_CASE )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
lowerCAmelCase__ : Optional[Any] = self.step_count
self.metrics[prefix].append(_SCREAMING_SNAKE_CASE ) # callback writes this to self.metrics_save_path
lowerCAmelCase__ : Optional[int] = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] )-> Dict:
return calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : dict )-> dict:
lowerCAmelCase__ : Any = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase__ : str = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_SCREAMING_SNAKE_CASE , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCAmelCase__ : List[str] = (time.time() - ta) / batch['''input_ids'''].shape[0]
lowerCAmelCase__ : List[str] = self.ids_to_clean_text(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = self.ids_to_clean_text(batch['''labels'''] )
lowerCAmelCase__ : Any = self._step(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = dict(zip(self.loss_names , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Dict = self.calc_generative_metrics(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = np.mean(lmap(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
base_metrics.update(gen_time=_SCREAMING_SNAKE_CASE , gen_len=_SCREAMING_SNAKE_CASE , preds=_SCREAMING_SNAKE_CASE , target=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return base_metrics
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] )-> Optional[int]:
return self._generative_step(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : int )-> str:
return self.validation_epoch_end(_SCREAMING_SNAKE_CASE , prefix='''test''' )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : int )-> SeqaSeqDataset:
lowerCAmelCase__ : Dict = self.n_obs[type_path]
lowerCAmelCase__ : Dict = self.target_lens[type_path]
lowerCAmelCase__ : int = self.dataset_class(
self.tokenizer , type_path=_SCREAMING_SNAKE_CASE , n_obs=_SCREAMING_SNAKE_CASE , max_target_length=_SCREAMING_SNAKE_CASE , **self.dataset_kwargs , )
return dataset
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = False )-> DataLoader:
lowerCAmelCase__ : str = self.get_dataset(_SCREAMING_SNAKE_CASE )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase__ : Any = dataset.make_sortish_sampler(_SCREAMING_SNAKE_CASE , distributed=self.hparams.gpus > 1 )
return DataLoader(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=_SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=_SCREAMING_SNAKE_CASE , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase__ : Optional[int] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_SCREAMING_SNAKE_CASE , batch_sampler=_SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=_SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__( self : List[str] )-> DataLoader:
lowerCAmelCase__ : Dict = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_SCREAMING_SNAKE_CASE )
return dataloader
def UpperCAmelCase__( self : List[str] )-> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def UpperCAmelCase__( self : List[Any] )-> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str )-> List[str]:
BaseTransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
add_generic_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--max_tokens_per_batch''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--logger_name''' , type=_SCREAMING_SNAKE_CASE , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_SCREAMING_SNAKE_CASE , default=-1 , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_SCREAMING_SNAKE_CASE , default=500 , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_SCREAMING_SNAKE_CASE , default=-1 , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_SCREAMING_SNAKE_CASE , default='''summarization''' , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_SCREAMING_SNAKE_CASE , default=0.0 , required=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--src_lang''' , type=_SCREAMING_SNAKE_CASE , default='''''' , required=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--tgt_lang''' , type=_SCREAMING_SNAKE_CASE , default='''''' , required=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--eval_beams''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--val_metric''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_SCREAMING_SNAKE_CASE , default=1 , required=_SCREAMING_SNAKE_CASE , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_SCREAMING_SNAKE_CASE , default=-1 , required=_SCREAMING_SNAKE_CASE , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class _a ( _lowercase):
_a : Tuple = '''translation'''
_a : Union[str, Any] = ['''loss''']
_a : int = ['''bleu''']
_a : List[str] = '''bleu'''
def __init__( self : Optional[int] , _SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Any )-> Optional[int]:
super().__init__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = hparams.src_lang
lowerCAmelCase__ : str = hparams.tgt_lang
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple )-> dict:
return calculate_bleu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( _a , _a=None ):
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=_a )
check_output_dir(_a , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase__ : SummarizationModule = SummarizationModule(_a )
else:
lowerCAmelCase__ : SummarizationModule = TranslationModule(_a )
lowerCAmelCase__ : Optional[int] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
lowerCAmelCase__ : Any = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase__ : Optional[int] = os.environ.get('''WANDB_PROJECT''' , _a )
lowerCAmelCase__ : Union[str, Any] = WandbLogger(name=model.output_dir.name , project=_a )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase__ : List[Any] = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowerCAmelCase__ : Optional[Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCAmelCase__ : int = False
lowerCAmelCase__ : str = args.val_metric == '''loss'''
lowerCAmelCase__ : pl.Trainer = generic_train(
_a , _a , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _a ) , early_stopping_callback=_a , logger=_a , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
lowerCAmelCase__ : Any = ''''''
lowerCAmelCase__ : Tuple = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=_a ) )
if checkpoints:
lowerCAmelCase__ : List[str] = checkpoints[-1]
lowerCAmelCase__ : Any = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
lowerCamelCase = pl.Trainer.add_argparse_args(parser)
lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowerCamelCase = parser.parse_args()
main(args)
| 131 | 0 |
import math
import random
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_A = 0.02
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Forward propagation
__UpperCamelCase =sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__UpperCamelCase =(expected / 1_00) - layer_a
# Error delta
__UpperCamelCase =layer_1_error * sigmoid_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = int(input('Expected value: '))
_A = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 356 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _UpperCAmelCase ( ):
print('Making key files...' )
make_key_files('rsa' , 10_24 )
print('Key files generation successful.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
print('Generating prime p...' )
__UpperCamelCase =rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE__ )
print('Generating prime q...' )
__UpperCamelCase =rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
__UpperCamelCase =random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(SCREAMING_SNAKE_CASE__ , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
__UpperCamelCase =cryptoMath.find_mod_inverse(SCREAMING_SNAKE_CASE__ , (p - 1) * (q - 1) )
__UpperCamelCase =(n, e)
__UpperCamelCase =(n, d)
return (public_key, private_key)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('\nWARNING:' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__UpperCamelCase , __UpperCamelCase =generate_key(SCREAMING_SNAKE_CASE__ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , 'w' ) as out_file:
out_file.write(F'{key_size},{public_key[0]},{public_key[1]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , 'w' ) as out_file:
out_file.write(F'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main()
| 117 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
_lowerCAmelCase = AutoTokenizer.from_pretrained("google/mt5-small" )
_lowerCAmelCase = tokenizer("Hello there" , return_tensors="tf" ).input_ids
_lowerCAmelCase = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
_lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase ).loss
_lowerCAmelCase = -tf.math.reduce_mean(_lowerCAmelCase ).numpy()
_lowerCAmelCase = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 158 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __a():
'''simple docstring'''
_lowerCAmelCase = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Parse args
_lowerCAmelCase , _lowerCAmelCase = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
_lowerCAmelCase = parse_unknown_args(SCREAMING_SNAKE_CASE_ )
# Run
_lowerCAmelCase = args.func(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 158 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( a , a ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def lowerCamelCase__ ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 351 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ : Optional[int] = 'bart'
UpperCAmelCase__ : Dict = True
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Dict:
if LOAD_DENSE_INDEX:
_A: Optional[Any] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_A: Any = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_A: Any = qar_model.eval()
else:
_A , _A: Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
_A: Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_A: Dict = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_A: Union[str, Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_A: int = sas_model.eval()
else:
_A , _A: Tuple = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Tuple:
if LOAD_DENSE_INDEX:
_A: List[Any] = faiss.StandardGpuResources()
_A: int = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_A: Dict = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
_A: str = faiss.IndexFlatIP(1_28 )
_A: Optional[int] = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
_A , _A: str = (None, None)
_A: Tuple = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> str:
_A: Dict = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_A: Dict = elia['''train_eli5''']
_A: List[Any] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
_A: Any = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : int = load_indexes()
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : Any = load_models()
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = load_train_data()
def lowerCamelCase__ ( a , a=10 ) -> str:
_A: Optional[int] = embed_questions_for_retrieval([question] , a , a )
_A , _A: List[str] = eli5_train_q_index.search(a , a )
_A: Dict = [elia_train[int(a )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( a , a="wiki40b" , a="dense" , a=10 ) -> str:
if source == "none":
_A , _A: Any = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_A , _A: List[Any] = query_qa_dense_index(
a , a , a , a , a , a )
else:
_A , _A: Tuple = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
_A: Union[str, Any] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_A: str = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def lowerCamelCase__ ( a , a , a , a=64 , a=2_56 , a=False , a=2 , a=0.95 , a=0.8 ) -> str:
with torch.no_grad():
_A: Optional[int] = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
UpperCAmelCase__ : List[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
UpperCAmelCase__ : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
UpperCAmelCase__ : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
UpperCAmelCase__ : List[str] = action_list.index(action_st)
UpperCAmelCase__ : Optional[Any] = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
UpperCAmelCase__ : List[Any] = show_type == 'Show full text of passages'
else:
UpperCAmelCase__ : Dict = 3
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Optional[Any] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
UpperCAmelCase__ : List[str] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
UpperCAmelCase__ : int = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
UpperCAmelCase__ : Tuple = 'wiki40b'
UpperCAmelCase__ : List[Any] = 'dense'
UpperCAmelCase__ : Tuple = 'beam'
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Dict = 64
UpperCAmelCase__ : Any = 256
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Generation options')
if generate_options:
UpperCAmelCase__ : Any = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
UpperCAmelCase__ : int = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ : str = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ : Tuple = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Optional[int] = None
# start main text
UpperCAmelCase__ : Any = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
UpperCAmelCase__ : List[Any] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ : Any = st.text_input('Enter your question here:', '')
else:
UpperCAmelCase__ : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = make_support(question, source=wiki_source, method='dense', n_results=10)
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
UpperCAmelCase__ : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ : str = support_list[:10]
UpperCAmelCase__ : str = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
UpperCAmelCase__ ,UpperCAmelCase__ : List[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
UpperCAmelCase__ : Any = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
UpperCAmelCase__ : Tuple = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ : Optional[int] = '[{}]({})'.format(res[0], wiki_url)
else:
UpperCAmelCase__ : int = sec_titles.split(' & ')
UpperCAmelCase__ : Union[str, Any] = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ : Union[str, Any] = find_nearest_training(question)
UpperCAmelCase__ : int = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
UpperCAmelCase__ : Tuple = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
UpperCAmelCase__ : Any = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301 | 0 |
from timeit import timeit
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
a :Tuple = 0
while number:
number &= number - 1
result += 1
return result
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
a :Optional[int] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __lowerCamelCase ( ):
"""simple docstring"""
def do_benchmark(UpperCAmelCase_ : int ) -> None:
a :str = '''import __main__ as z'''
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(UpperCAmelCase_ ) = }''' )
a :str = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=UpperCAmelCase_ )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(UpperCAmelCase_ ) = }''' )
a :Union[str, Any] = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=UpperCAmelCase_ , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(UpperCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 94 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case : Union[str, Any] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=8 ):
"""simple docstring"""
a :List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a :int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
self.register_modules(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , movq=_lowerCamelCase , )
a :Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if latents is None:
a :str = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
a :Any = latents.to(_lowerCamelCase )
a :Dict = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a :int = torch.device(F'''cuda:{gpu_id}''' )
a :int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
a :Any = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a :Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
a , a :List[str] = cpu_offload_with_hook(_lowerCamelCase , _lowerCamelCase , prev_module_hook=_lowerCamelCase )
# We'll offload the last model manually.
a :str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 100 , _lowerCamelCase = 4.0 , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ):
a :int = self._execution_device
a :Optional[Any] = guidance_scale > 1.0
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :Union[str, Any] = torch.cat(_lowerCamelCase , dim=0 )
a :Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :List[str] = torch.cat(_lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
a :Union[str, Any] = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
a :Optional[int] = negative_image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
a :Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCamelCase )
self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase )
a :Optional[Any] = self.scheduler.timesteps
a :List[str] = self.unet.config.in_channels
a , a :str = downscale_height_and_width(_lowerCamelCase , _lowerCamelCase , self.movq_scale_factor )
# create initial latent
a :int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
a :Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a :Union[str, Any] = {'''image_embeds''': image_embeds}
a :Optional[Any] = self.unet(
sample=_lowerCamelCase , timestep=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , added_cond_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
if do_classifier_free_guidance:
a , a :Any = noise_pred.split(latents.shape[1] , dim=1 )
a , a :List[str] = noise_pred.chunk(2 )
a , a :int = variance_pred.chunk(2 )
a :List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a :Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a , a :Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a :int = self.scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase , )[0]
# post-processing
a :int = self.movq.decode(_lowerCamelCase , force_not_quantize=_lowerCamelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
a :str = image * 0.5 + 0.5
a :List[Any] = image.clamp(0 , 1 )
a :str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a :str = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 94 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_a : List[str]= False
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : List[str]) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : Optional[Any]) -> Union[str, Any]:
return 12
@property
def _lowercase (self : Dict) -> Union[str, Any]:
return 12
@property
def _lowercase (self : int) -> Tuple:
return 32
@property
def _lowercase (self : Optional[int]) -> Dict:
torch.manual_seed(0)
__snake_case : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def _lowercase (self : List[Any]) -> Optional[int]:
__snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def _lowercase (self : Union[str, Any]) -> Optional[int]:
torch.manual_seed(0)
__snake_case : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_A)
@property
def _lowercase (self : Union[str, Any]) -> Dict:
torch.manual_seed(0)
__snake_case : Any = 12
__snake_case : int = 12
__snake_case : List[Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
__snake_case : Union[str, Any] = TransformeraDModel(**_A)
return model
def _lowercase (self : Union[str, Any]) -> Dict:
__snake_case : Tuple = 'cpu'
__snake_case : List[str] = self.dummy_vqvae
__snake_case : str = self.dummy_text_encoder
__snake_case : Optional[Any] = self.dummy_tokenizer
__snake_case : Dict = self.dummy_transformer
__snake_case : Optional[int] = VQDiffusionScheduler(self.num_embed)
__snake_case : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=_A)
__snake_case : List[Any] = VQDiffusionPipeline(
vqvae=_A , text_encoder=_A , tokenizer=_A , transformer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
__snake_case : List[Any] = pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : Optional[Any] = 'teddy bear playing in the pool'
__snake_case : str = torch.Generator(device=_A).manual_seed(0)
__snake_case : Union[str, Any] = pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='np')
__snake_case : Optional[int] = output.images
__snake_case : int = torch.Generator(device=_A).manual_seed(0)
__snake_case : Tuple = pipe(
[prompt] , generator=_A , output_type='np' , return_dict=_A , num_inference_steps=2)[0]
__snake_case : str = image[0, -3:, -3:, -1]
__snake_case : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__snake_case : str = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def _lowercase (self : Tuple) -> Optional[int]:
__snake_case : Optional[Any] = 'cpu'
__snake_case : Optional[int] = self.dummy_vqvae
__snake_case : List[str] = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : Optional[Any] = self.dummy_transformer
__snake_case : Union[str, Any] = VQDiffusionScheduler(self.num_embed)
__snake_case : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_A , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
__snake_case : Union[str, Any] = VQDiffusionPipeline(
vqvae=_A , text_encoder=_A , tokenizer=_A , transformer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
__snake_case : Union[str, Any] = pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : Union[str, Any] = 'teddy bear playing in the pool'
__snake_case : Optional[int] = torch.Generator(device=_A).manual_seed(0)
__snake_case : Tuple = pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='np')
__snake_case : Optional[Any] = output.images
__snake_case : str = torch.Generator(device=_A).manual_seed(0)
__snake_case : Dict = pipe(
[prompt] , generator=_A , output_type='np' , return_dict=_A , num_inference_steps=2)[0]
__snake_case : int = image[0, -3:, -3:, -1]
__snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__snake_case : Optional[Any] = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Any) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Tuple) -> Optional[int]:
__snake_case : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
__snake_case : Union[str, Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
__snake_case : Tuple = pipeline.to(_A)
pipeline.set_progress_bar_config(disable=_A)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__snake_case : Optional[int] = torch.Generator(device=_A).manual_seed(0)
__snake_case : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=_A , output_type='np' , )
__snake_case : int = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image).max() < 2.0
| 95 | """simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> str:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
__snake_case : Any = precision
__snake_case : Optional[int] = ceil(precision / 14 )
__snake_case : List[Any] = 42_68_80 * Decimal(1_00_05 ).sqrt()
__snake_case : Optional[Any] = 1
__snake_case : Union[str, Any] = 13_59_14_09
__snake_case : int = Decimal(UpperCAmelCase_ )
for k in range(1 , UpperCAmelCase_ ):
__snake_case : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCAmelCase_ ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_a : List[Any]= 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 95 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Optional[int]:
if config_name_or_path is None:
snake_case_ = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
snake_case_ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
snake_case_ = question_encoder_name_or_path
snake_case_ = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
snake_case_ = RagConfig.from_pretrained(__lowercase )
snake_case_ = AutoConfig.from_pretrained(__lowercase )
snake_case_ = AutoConfig.from_pretrained(__lowercase )
snake_case_ = gen_config
snake_case_ = question_encoder_config
snake_case_ = model_class.from_pretrained_question_encoder_generator(
__lowercase , __lowercase , config=__lowercase )
rag_model.save_pretrained(__lowercase )
# Sanity check.
model_class.from_pretrained(__lowercase )
# Save tokenizers.
snake_case_ = AutoTokenizer.from_pretrained(__lowercase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
snake_case_ = AutoTokenizer.from_pretrained(__lowercase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 69 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : Optional[torch.FloatTensor] = None
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = sigma_max
# setable values
A: int = None
A: np.IntTensor = None
A: torch.FloatTensor = None # sigma(t_i)
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = num_inference_steps
A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
A: str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
A: List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
A: Optional[Any] = sigma + gamma * sigma
A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: Union[str, Any] = sample_hat + sigma_hat * model_output
A: str = (sample_hat - pred_original_sample) / sigma_hat
A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: int = sample_prev + sigma_prev * model_output
A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev
A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
| 319 | 0 |
from sklearn.metrics import fa_score
import datasets
__magic_name__: str = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
__magic_name__: List[str] = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
__magic_name__: List[Any] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def __magic_name__ ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=1 , lowerCAmelCase__="binary" , lowerCAmelCase__=None ) -> Any:
__magic_name__ : int = fa_score(
lowerCAmelCase__ , lowerCAmelCase__ , labels=lowerCAmelCase__ , pos_label=lowerCAmelCase__ , average=lowerCAmelCase__ , sample_weight=lowerCAmelCase__ )
return {"f1": float(lowerCAmelCase__ ) if score.size == 1 else score}
| 138 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__magic_name__: Any = logging.get_logger(__name__)
__magic_name__: Dict = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Any = '''blenderbot-small'''
lowercase__ : Optional[int] = ['''past_key_values''']
lowercase__ : Dict = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase__=5_02_65 , lowerCAmelCase__=5_12 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=16 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=5_12 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> Tuple:
__magic_name__ : Tuple = vocab_size
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Union[str, Any] = d_model
__magic_name__ : Optional[int] = encoder_ffn_dim
__magic_name__ : Union[str, Any] = encoder_layers
__magic_name__ : List[str] = encoder_attention_heads
__magic_name__ : List[Any] = decoder_ffn_dim
__magic_name__ : str = decoder_layers
__magic_name__ : List[str] = decoder_attention_heads
__magic_name__ : Union[str, Any] = dropout
__magic_name__ : Tuple = attention_dropout
__magic_name__ : List[Any] = activation_dropout
__magic_name__ : List[Any] = activation_function
__magic_name__ : Optional[int] = init_std
__magic_name__ : Dict = encoder_layerdrop
__magic_name__ : Union[str, Any] = decoder_layerdrop
__magic_name__ : Optional[int] = use_cache
__magic_name__ : List[Any] = encoder_layers
__magic_name__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
class snake_case__ ( _lowerCAmelCase ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Optional[int] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ : List[Any] = {0: """batch"""}
__magic_name__ : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__magic_name__ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
__magic_name__ : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__magic_name__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ ,__magic_name__ : Dict = self.num_layers
for i in range(lowerCAmelCase__ ):
__magic_name__ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__magic_name__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Any = super().outputs
else:
__magic_name__ : int = super(lowerCAmelCase__ , self ).outputs
if self.use_past:
__magic_name__ ,__magic_name__ : str = self.num_layers
for i in range(lowerCAmelCase__ ):
__magic_name__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
__magic_name__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Generate decoder inputs
__magic_name__ : Optional[int] = seq_length if not self.use_past else 1
__magic_name__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[Any] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__magic_name__ : Dict = dict(**lowerCAmelCase__ , **lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ ,__magic_name__ : List[Any] = common_inputs["""input_ids"""].shape
__magic_name__ : Optional[Any] = common_inputs["""decoder_input_ids"""].shape[1]
__magic_name__ ,__magic_name__ : str = self.num_attention_heads
__magic_name__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : Any = decoder_seq_length + 3
__magic_name__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__magic_name__ : List[str] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ )] , dim=1 )
__magic_name__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__magic_name__ ,__magic_name__ : List[str] = self.num_layers
__magic_name__ : Optional[Any] = min(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = max(lowerCAmelCase__ , lowerCAmelCase__ ) - min_num_layers
__magic_name__ : Tuple = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowerCAmelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
) )
# TODO: test this.
__magic_name__ : Union[str, Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowerCAmelCase__ , lowerCAmelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
__magic_name__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ ,__magic_name__ : Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__magic_name__ : List[Any] = seqlen + 2
__magic_name__ ,__magic_name__ : Any = self.num_layers
__magic_name__ ,__magic_name__ : int = self.num_attention_heads
__magic_name__ : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : Optional[int] = common_inputs["""attention_mask"""].dtype
__magic_name__ : Optional[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
__magic_name__ : Tuple = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(lowerCAmelCase__ )
]
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__magic_name__ : Tuple = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__magic_name__ : str = tokenizer.num_special_tokens_to_add(lowerCAmelCase__ )
__magic_name__ : List[str] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
__magic_name__ : List[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__magic_name__ : List[str] = dict(tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
elif self.task == "causal-lm":
__magic_name__ : str = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
else:
__magic_name__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : List[Any] = super()._flatten_past_key_values_(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__magic_name__ : Tuple = super(lowerCAmelCase__ , self )._flatten_past_key_values_(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
| 138 | 1 |
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _lowercase :
'''simple docstring'''
pass
| 229 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299 | 0 |
def A ( _lowerCamelCase ):
'''simple docstring'''
try:
_lowerCAmelCase : List[Any] = float(__A )
except ValueError:
raise ValueError("Please enter a valid number" )
_lowerCAmelCase : Union[str, Any] = decimal - int(__A )
if fractional_part == 0:
return int(__A ), 1
else:
_lowerCAmelCase : List[str] = len(str(__A ).split("." )[1] )
_lowerCAmelCase : Dict = int(decimal * (10**number_of_frac_digits) )
_lowerCAmelCase : Any = 10**number_of_frac_digits
_lowerCAmelCase , _lowerCAmelCase : Tuple = denominator, numerator
while True:
_lowerCAmelCase : Optional[int] = dividend % divisor
if remainder == 0:
break
_lowerCAmelCase , _lowerCAmelCase : Any = divisor, remainder
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = numerator / divisor, denominator / divisor
return int(__A ), int(__A )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''') | 357 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case_ : Optional[int] = logging.get_logger(__name__)
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[str] = b.T
_UpperCamelCase : Optional[Any] = np.sum(np.square(__A ) , axis=1 )
_UpperCamelCase : Tuple = np.sum(np.square(__A ) , axis=0 )
_UpperCamelCase : Dict = np.matmul(__A , __A )
_UpperCamelCase : Dict = aa[:, None] - 2 * ab + ba[None, :]
return d
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = x.reshape(-1 , 3 )
_UpperCamelCase : int = squared_euclidean_distance(__A , __A )
return np.argmin(__A , axis=1 )
class lowercase__ ( a__ ):
lowercase__ = ["""pixel_values"""]
def __init__( self : int ,lowerCamelCase__ : int = None ,lowerCamelCase__ : Optional[int] = True ,lowerCamelCase__ : str = None ,lowerCamelCase__ : int = PILImageResampling.BILINEAR ,lowerCamelCase__ : int = True ,lowerCamelCase__ : Optional[Any] = True ,**lowerCamelCase__ : str ,):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCamelCase : List[str] = size if size is not None else {'height': 256, 'width': 256}
_UpperCamelCase : Union[str, Any] = get_size_dict(lowerCamelCase__ )
_UpperCamelCase : Any = np.array(lowerCamelCase__ ) if clusters is not None else None
_UpperCamelCase : Any = do_resize
_UpperCamelCase : List[Any] = size
_UpperCamelCase : List[Any] = resample
_UpperCamelCase : str = do_normalize
_UpperCamelCase : Optional[Any] = do_color_quantize
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int = PILImageResampling.BILINEAR ,lowerCamelCase__ : Tuple = None ,**lowerCamelCase__ : Any ,):
'''simple docstring'''
_UpperCamelCase : List[str] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
lowerCamelCase__ ,size=(size['height'], size['width']) ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str = None ,):
'''simple docstring'''
_UpperCamelCase : str = rescale(image=lowerCamelCase__ ,scale=1 / 1_2_7.5 ,data_format=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = image - 1
return image
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str] = None ,lowerCamelCase__ : str = None ,lowerCamelCase__ : Union[str, Any] = None ,lowerCamelCase__ : Tuple = None ,lowerCamelCase__ : str = None ,lowerCamelCase__ : Dict = None ,lowerCamelCase__ : List[Any] = None ,lowerCamelCase__ : Dict = ChannelDimension.FIRST ,**lowerCamelCase__ : Optional[int] ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Dict = size if size is not None else self.size
_UpperCamelCase : List[Any] = get_size_dict(lowerCamelCase__ )
_UpperCamelCase : Dict = resample if resample is not None else self.resample
_UpperCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_UpperCamelCase : Any = clusters if clusters is not None else self.clusters
_UpperCamelCase : Union[str, Any] = np.array(lowerCamelCase__ )
_UpperCamelCase : Tuple = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
_UpperCamelCase : int = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_UpperCamelCase : List[Any] = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images]
if do_normalize:
_UpperCamelCase : Union[str, Any] = [self.normalize(image=lowerCamelCase__ ) for image in images]
if do_color_quantize:
_UpperCamelCase : Any = [to_channel_dimension_format(lowerCamelCase__ ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_UpperCamelCase : Dict = np.array(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = color_quantize(lowerCamelCase__ ,lowerCamelCase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_UpperCamelCase : Union[str, Any] = images.shape[0]
_UpperCamelCase : List[str] = images.reshape(lowerCamelCase__ ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_UpperCamelCase : Optional[int] = list(lowerCamelCase__ )
else:
_UpperCamelCase : Union[str, Any] = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
_UpperCamelCase : List[Any] = {'input_ids': images}
return BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
| 83 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase_ ( a__ ):
def __init__( self , a , a , a = None , a = None , a = False , **a , ):
super().__init__(features=a , cache_dir=a , keep_in_memory=a , **a )
UpperCamelCase__ = Sql(
cache_dir=a , features=a , sql=a , con=a , **a , )
def __a ( self ):
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , )
# Build dataset for splits
UpperCamelCase__ = self.builder.as_dataset(
split="train" , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
def __init__( self , a , a , a , a = None , a = None , **a , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCamelCase__ = dataset
UpperCamelCase__ = name
UpperCamelCase__ = con
UpperCamelCase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase__ = num_proc
UpperCamelCase__ = to_sql_kwargs
def __a ( self ):
UpperCamelCase__ = self.to_sql_kwargs.pop("sql" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("con" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("index" , a )
UpperCamelCase__ = self._write(index=a , **self.to_sql_kwargs )
return written
def __a ( self , a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = args
UpperCamelCase__ = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCamelCase__ = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase__ = batch.to_pandas()
UpperCamelCase__ = df.to_sql(self.name , self.con , index=a , **a )
return num_rows or len(a )
def __a ( self , a , **a ):
UpperCamelCase__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase__ , UpperCamelCase__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 80 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
_SCREAMING_SNAKE_CASE : str = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
_SCREAMING_SNAKE_CASE : List[str] = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
_SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
_SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
_SCREAMING_SNAKE_CASE : Dict = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
_SCREAMING_SNAKE_CASE : int = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
_SCREAMING_SNAKE_CASE : List[Any] = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
_SCREAMING_SNAKE_CASE : Dict = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
_SCREAMING_SNAKE_CASE : int = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __a ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FLAX_MODEL_MAPPING
_SCREAMING_SNAKE_CASE : Tuple = auto_class_update(FlaxAutoModel)
class __a ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_SCREAMING_SNAKE_CASE : Dict = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class __a ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class __a ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_SCREAMING_SNAKE_CASE : str = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class __a ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : Tuple = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class __a ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class __a ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE : Tuple = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class __a ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : Any = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class __a ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class __a ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_SCREAMING_SNAKE_CASE : List[str] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class __a ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : List[str] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class __a ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE : str = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class __a ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 353 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __a ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : List[str] =1
UpperCamelCase__ : List[str] =3
UpperCamelCase__ : Optional[Any] =(32, 32)
UpperCamelCase__ : Tuple =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase_ )
return image
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase__ : Dict =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def _lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(lowercase_ )
@property
def _lowerCAmelCase ( self : Optional[Any] ):
def extract(*lowercase_ : Dict , **lowercase_ : List[Any] ):
class __a :
"""simple docstring"""
def __init__( self : Optional[Any] ):
UpperCamelCase__ : Dict =torch.ones([0] )
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : Optional[int] ):
self.pixel_values.to(lowercase_ )
return self
return Out()
return extract
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : str ='''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any =self.dummy_cond_unet
UpperCamelCase__ : Tuple =PNDMScheduler(skip_prk_steps=lowercase_ )
UpperCamelCase__ : Optional[Any] =self.dummy_vae
UpperCamelCase__ : List[str] =self.dummy_text_encoder
UpperCamelCase__ : List[str] =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCamelCase__ : Tuple =77
UpperCamelCase__ : int =self.dummy_image.to(lowercase_ )
UpperCamelCase__ : Tuple =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Union[str, Any] =AltDiffusionImgaImgPipeline(
unet=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , safety_checker=lowercase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : List[Any] =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase_ )
UpperCamelCase__ : Union[str, Any] =alt_pipe.to(lowercase_ )
alt_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Tuple ='''A painting of a squirrel eating a burger'''
UpperCamelCase__ : str =torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCamelCase__ : str =alt_pipe(
[prompt] , generator=lowercase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=lowercase_ , )
UpperCamelCase__ : Any =output.images
UpperCamelCase__ : Tuple =torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCamelCase__ : str =alt_pipe(
[prompt] , generator=lowercase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=lowercase_ , return_dict=lowercase_ , )[0]
UpperCamelCase__ : Union[str, Any] =image[0, -3:, -3:, -1]
UpperCamelCase__ : int =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ : Optional[Any] =np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : List[Any] =self.dummy_cond_unet
UpperCamelCase__ : int =PNDMScheduler(skip_prk_steps=lowercase_ )
UpperCamelCase__ : Optional[Any] =self.dummy_vae
UpperCamelCase__ : Dict =self.dummy_text_encoder
UpperCamelCase__ : Optional[int] =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCamelCase__ : List[Any] =77
UpperCamelCase__ : List[Any] =self.dummy_image.to(lowercase_ )
# put models in fp16
UpperCamelCase__ : Dict =unet.half()
UpperCamelCase__ : List[str] =vae.half()
UpperCamelCase__ : int =bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : List[str] =AltDiffusionImgaImgPipeline(
unet=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , safety_checker=lowercase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Union[str, Any] =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase_ )
UpperCamelCase__ : List[Any] =alt_pipe.to(lowercase_ )
alt_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Dict ='''A painting of a squirrel eating a burger'''
UpperCamelCase__ : Optional[Any] =torch.manual_seed(0 )
UpperCamelCase__ : str =alt_pipe(
[prompt] , generator=lowercase_ , num_inference_steps=2 , output_type='''np''' , image=lowercase_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : str =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase__ : int =init_image.resize((760, 504) )
UpperCamelCase__ : Optional[int] ='''BAAI/AltDiffusion'''
UpperCamelCase__ : Union[str, Any] =AltDiffusionImgaImgPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ : Dict ='''A fantasy landscape, trending on artstation'''
UpperCamelCase__ : str =torch.manual_seed(0 )
UpperCamelCase__ : Any =pipe(
prompt=lowercase_ , image=lowercase_ , strength=0.7_5 , guidance_scale=7.5 , generator=lowercase_ , output_type='''np''' , )
UpperCamelCase__ : List[Any] =output.images[0]
UpperCamelCase__ : int =image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCamelCase__ : Union[str, Any] =np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCamelCase__ : List[Any] =init_image.resize((768, 512) )
UpperCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
UpperCamelCase__ : List[str] ='''BAAI/AltDiffusion'''
UpperCamelCase__ : List[str] =AltDiffusionImgaImgPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ : List[Any] ='''A fantasy landscape, trending on artstation'''
UpperCamelCase__ : List[Any] =torch.manual_seed(0 )
UpperCamelCase__ : int =pipe(
prompt=lowercase_ , image=lowercase_ , strength=0.7_5 , guidance_scale=7.5 , generator=lowercase_ , output_type='''np''' , )
UpperCamelCase__ : List[Any] =output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 157 | 0 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowercase : Any = True
except (ImportError, ModuleNotFoundError):
lowercase : int = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
re.sub('<n>' , '' , __A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__A ) )
| 42 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
snake_case__ : Optional[int] = parser.parse_args()
if args.model_type == "bert":
snake_case__ : Dict = BertForMaskedLM.from_pretrained(args.model_name)
snake_case__ : Union[str, Any] = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
snake_case__ : Optional[int] = model.state_dict()
snake_case__ : List[Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
snake_case__ : Tuple = state_dict[f'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
snake_case__ : Optional[Any] = state_dict[f'{prefix}.embeddings.LayerNorm.{w}']
snake_case__ : int = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
snake_case__ : Union[str, Any] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
snake_case__ : Dict = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
snake_case__ : int = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
snake_case__ : int = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
snake_case__ : Optional[int] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
snake_case__ : Optional[Any] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
snake_case__ : List[str] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
snake_case__ : int = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
snake_case__ : Optional[int] = state_dict['cls.predictions.decoder.weight']
snake_case__ : str = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
snake_case__ : int = state_dict[f'cls.predictions.transform.dense.{w}']
snake_case__ : Optional[int] = state_dict[f'cls.predictions.transform.LayerNorm.{w}']
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 117 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = ["image_processor", "tokenizer"]
_UpperCAmelCase :int = "ViTImageProcessor"
_UpperCAmelCase :Optional[int] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
lowercase__: Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _UpperCAmelCase , )
lowercase__: Optional[int] = kwargs.pop('''feature_extractor''' )
lowercase__: List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
lowercase__: Optional[Any] = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
lowercase__: str = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
lowercase__: Tuple = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
lowercase__: Any = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowercase__: str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowercase__: Union[str, Any] = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def _snake_case ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _UpperCAmelCase , )
return self.image_processor_class
@property
def _snake_case ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _UpperCAmelCase , )
return self.image_processor
| 2 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: int = ''''''
for word_or_phrase in separated:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(__UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 2 | 1 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCAmelCase = model_type_to_module_name(SCREAMING_SNAKE_CASE )
lowerCAmelCase = importlib.import_module(F'.{module_name}' , """transformers.models""" )
try:
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE , """__name__""" , SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCAmelCase = importlib.import_module("""transformers""" )
if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
lowerCAmelCase = get_file_from_repo(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as reader:
return json.load(SCREAMING_SNAKE_CASE )
class lowercase :
def __init__( self ) -> str:
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(lowercase )
def _snake_case ( cls , lowercase , **lowercase ) -> Union[str, Any]:
lowerCAmelCase = kwargs.pop("""config""" , lowercase )
lowerCAmelCase = kwargs.pop("""trust_remote_code""" , lowercase )
lowerCAmelCase = True
lowerCAmelCase , lowerCAmelCase = ImageProcessingMixin.get_image_processor_dict(lowercase , **lowercase )
lowerCAmelCase = config_dict.get("""image_processor_type""" , lowercase )
lowerCAmelCase = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
lowerCAmelCase = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowerCAmelCase = config_dict.pop("""feature_extractor_type""" , lowercase )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
lowerCAmelCase = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
lowerCAmelCase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
lowerCAmelCase = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowercase , lowercase ):
lowerCAmelCase = AutoConfig.from_pretrained(lowercase , **lowercase )
# It could be in `config.image_processor_type``
lowerCAmelCase = getattr(lowercase , """image_processor_type""" , lowercase )
if hasattr(lowercase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
lowerCAmelCase = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
lowerCAmelCase = image_processor_class_from_name(lowercase )
lowerCAmelCase = image_processor_auto_map is not None
lowerCAmelCase = image_processor_class is not None or type(lowercase ) in IMAGE_PROCESSOR_MAPPING
lowerCAmelCase = resolve_trust_remote_code(
lowercase , lowercase , lowercase , lowercase )
if has_remote_code and trust_remote_code:
lowerCAmelCase = get_class_from_dynamic_module(
lowercase , lowercase , **lowercase )
lowerCAmelCase = kwargs.pop("""code_revision""" , lowercase )
if os.path.isdir(lowercase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowercase , **lowercase )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowercase , **lowercase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowercase ) in IMAGE_PROCESSOR_MAPPING:
lowerCAmelCase = IMAGE_PROCESSOR_MAPPING[type(lowercase )]
return image_processor_class.from_dict(lowercase , **lowercase )
raise ValueError(
f'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
f'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def _snake_case ( lowercase , lowercase ) -> Optional[Any]:
IMAGE_PROCESSOR_MAPPING.register(lowercase , lowercase )
| 46 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = RealmTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[int]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**snake_case_ )
__lowerCAmelCase = do_lower_case
def A__ ( self , snake_case_ , **snake_case_ ) -> Tuple:
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = text
__lowerCAmelCase = kwargs.pop("""text_pair""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""return_tensors""" , snake_case_ )
__lowerCAmelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(snake_case_ ):
if batch_text_pair is not None:
__lowerCAmelCase = batch_text_pair[idx]
else:
__lowerCAmelCase = None
__lowerCAmelCase = super().__call__(snake_case_ , snake_case_ , return_tensors=snake_case_ , **snake_case_ )
__lowerCAmelCase = encoded_candidates.get("""input_ids""" )
__lowerCAmelCase = encoded_candidates.get("""attention_mask""" )
__lowerCAmelCase = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case_ )
__lowerCAmelCase = {key: item for key, item in output_data.items() if len(snake_case_ ) != 0}
return BatchEncoding(snake_case_ , tensor_type=snake_case_ )
def A__ ( self , snake_case_ , snake_case_=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 301 | 0 |
import unittest
import numpy as np
def lowerCAmelCase__ ( a__: np.ndarray , a__: np.ndarray , a__: np.ndarray , a__: np.ndarray | None = None , ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = np.shape(a__ )
_UpperCAmelCase = np.shape(a__ )
_UpperCAmelCase = np.shape(a__ )
if shape_a[0] != shape_b[0]:
_UpperCAmelCase = (
'Expected the same number of rows for A and B. '
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(a__ )
if shape_b[1] != shape_c[1]:
_UpperCAmelCase = (
'Expected the same number of columns for B and C. '
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(a__ )
_UpperCAmelCase = pseudo_inv
if a_inv is None:
try:
_UpperCAmelCase = np.linalg.inv(a__ )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
_UpperCAmelCase = np.array([[2, 1], [6, 3]] )
_UpperCAmelCase = schur_complement(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.block([[a, b], [b.T, c]] )
_UpperCAmelCase = np.linalg.det(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.linalg.det(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.linalg.det(_SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , det_a * det_s )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
_UpperCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
_UpperCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 185 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCAmelCase__ :Dict = pd.read_csv('''sample_data.csv''', header=None)
lowerCAmelCase__ :int = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCAmelCase__ :Union[str, Any] = df.iloc[:, 1:2]
lowerCAmelCase__ :Optional[int] = actual_data.values.reshape(len_data, 1)
lowerCAmelCase__ :Tuple = MinMaxScaler().fit_transform(actual_data)
lowerCAmelCase__ :str = 1_0
lowerCAmelCase__ :Optional[Any] = 5
lowerCAmelCase__ :List[str] = 2_0
lowerCAmelCase__ :Any = len_data - periods * look_back
lowerCAmelCase__ :Union[str, Any] = actual_data[:division]
lowerCAmelCase__ :Tuple = actual_data[division - look_back :]
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = [], []
lowerCAmelCase__ , lowerCAmelCase__ :str = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCAmelCase__ :Optional[Any] = np.array(train_x)
lowerCAmelCase__ :Any = np.array(test_x)
lowerCAmelCase__ :Dict = np.array([list(i.ravel()) for i in train_y])
lowerCAmelCase__ :Tuple = np.array([list(i.ravel()) for i in test_y])
lowerCAmelCase__ :Optional[int] = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
lowerCAmelCase__ :List[Any] = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
lowerCAmelCase__ :Optional[Any] = model.predict(x_test)
| 185 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , ) -> Dict:
"""simple docstring"""
A : List[str] = parent
A : int = batch_size
A : Optional[int] = image_size
A : int = num_channels
A : List[Any] = embeddings_size
A : str = hidden_sizes
A : Any = depths
A : List[str] = is_training
A : Any = use_labels
A : Any = hidden_act
A : Tuple = num_labels
A : List[str] = scope
A : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[Any] = None
if self.use_labels:
A : str = ids_tensor([self.batch_size] , self.num_labels )
A : List[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : int = RegNetModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[str] = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : str = self.num_labels
A : List[Any] = RegNetForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = self.prepare_config_and_inputs()
A, A, A : Optional[int] = config_and_inputs
A : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Any = RegNetModelTester(self )
A : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A, A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
A : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Dict = [*signature.parameters.keys()]
A : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Tuple = model_class(config=SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : Any = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A, A : str = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : Dict = layer_type
A : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Dict = RegNetModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE )
A : Dict = self.default_image_processor
A : int = prepare_img()
A : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A : List[str] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
A : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
A : Tuple = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 3 | def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Any = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
for i in range(_lowercase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE : List[str] = True
if a[i].islower():
SCREAMING_SNAKE_CASE : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A: int = logging.get_logger(__name__)
A: Any = {"vocab_file": "vocab.txt"}
A: Optional[int] = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
A: Optional[int] = {
"YituTech/conv-bert-base": 5_1_2,
"YituTech/conv-bert-medium-small": 5_1_2,
"YituTech/conv-bert-small": 5_1_2,
}
A: int = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : int = ConvBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCAmelCase : Dict = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
UpperCAmelCase : str = do_lower_case
UpperCAmelCase : Optional[int] = strip_accents
UpperCAmelCase : List[str] = tokenize_chinese_chars
UpperCAmelCase : Dict = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = do_lower_case
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase : Dict = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 76 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Tuple=False ):
if isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : List[str] = len(set_a.intersection(UpperCamelCase ) )
if alternative_union:
UpperCAmelCase : int = len(UpperCamelCase ) + len(UpperCamelCase )
else:
UpperCAmelCase : Any = len(set_a.union(UpperCamelCase ) )
return intersection / union
if isinstance(UpperCamelCase , (list, tuple) ) and isinstance(UpperCamelCase , (list, tuple) ):
UpperCAmelCase : Any = [element for element in set_a if element in set_b]
if alternative_union:
UpperCAmelCase : Optional[int] = len(UpperCamelCase ) + len(UpperCamelCase )
return len(UpperCamelCase ) / union
else:
UpperCAmelCase : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(UpperCamelCase ) / len(UpperCamelCase )
return len(UpperCamelCase ) / len(UpperCamelCase )
return None
if __name__ == "__main__":
A: List[Any] = {"a", "b", "c", "d", "e"}
A: List[Any] = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 76 | 1 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=64 , lowerCAmelCase_=None ) -> List[Any]:
_A = np.random.default_rng(lowerCAmelCase_ )
_A = length
_A = rng.normal(size=(length,) ).astype(np.floataa )
_A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> List[str]:
return self.length
def __getitem__( self , lowerCAmelCase_ ) -> Tuple:
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=False ) -> Union[str, Any]:
super().__init__()
_A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_A = True
def UpperCAmelCase ( self , lowerCAmelCase_=None ) -> Optional[Any]:
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
_A = False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=False ) -> List[Any]:
super().__init__()
_A = torch.nn.Parameter(torch.tensor(lowerCAmelCase_ ).float() )
_A = torch.nn.Parameter(torch.tensor(lowerCAmelCase_ ).float() )
_A = True
def UpperCAmelCase ( self , lowerCAmelCase_=None ) -> List[Any]:
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
_A = False
return x * self.a + self.b
def snake_case ( snake_case__ :Tuple , snake_case__ :int = 16) -> Optional[int]:
from datasets import load_dataset
from transformers import AutoTokenizer
_A = AutoTokenizer.from_pretrained("""bert-base-cased""")
_A = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
_A = load_dataset("""csv""" , data_files=snake_case__)
_A = datasets["""train"""].unique("""label""")
_A = {v: i for i, v in enumerate(snake_case__)}
def tokenize_function(snake_case__ :List[str]):
# max_length=None => use the model max length (it's actually the default)
_A = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""")
if "label" in examples:
_A = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_A = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(snake_case__ :str):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""")
return tokenizer.pad(snake_case__ , padding="""longest""" , return_tensors="""pt""")
# Instantiate dataloaders.
_A = DataLoader(tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=2)
_A = DataLoader(tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=1)
return train_dataloader, eval_dataloader
| 180 | import torch
from torch import nn
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False ) -> Any:
super().__init__()
_A = n_token
_A = d_embed
_A = d_proj
_A = cutoffs + [n_token]
_A = [0] + self.cutoffs
_A = div_val
_A = self.cutoffs[0]
_A = len(self.cutoffs ) - 1
_A = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_A = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_A = nn.Parameter(torch.zeros(self.n_clusters ) )
_A = nn.ModuleList()
_A = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
else:
self.out_projs.append(lowerCAmelCase_ )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , r_idx - l_idx ) )
_A = keep_order
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
if proj is None:
_A = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_A = nn.functional.linear(lowerCAmelCase_ , proj.t().contiguous() )
_A = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False ) -> List[Any]:
if labels is not None:
# Shift so that tokens < n predict n
_A = hidden[..., :-1, :].contiguous()
_A = labels[..., 1:].contiguous()
_A = hidden.view(-1 , hidden.size(-1 ) )
_A = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
_A = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_A = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_A = labels != -1_00
_A = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
_A = (
-nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
_A , _A = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = self.out_layers[0].weight[l_idx:r_idx]
_A = self.out_layers[0].bias[l_idx:r_idx]
else:
_A = self.out_layers[i].weight
_A = self.out_layers[i].bias
if i == 0:
_A = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_A = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
_A , _A , _A = weights[0], biases[0], self.out_projs[0]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
if labels is None:
_A = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_A = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
_A = 0
_A = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
_A , _A = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_A = (labels >= l_idx) & (labels < r_idx)
_A = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_A = labels.index_select(0 , lowerCAmelCase_ ) - l_idx
_A = head_logprob.index_select(0 , lowerCAmelCase_ )
_A = hidden.index_select(0 , lowerCAmelCase_ )
else:
_A = hidden
if i == 0:
if labels is not None:
_A = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_A = head_logprob[:, : self.cutoffs[0]]
else:
_A , _A , _A = weights[i], biases[i], self.out_projs[i]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
_A = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_A = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_A = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_A = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
if self.n_clusters == 0:
_A = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
_A , _A = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = self.out_layers[0].weight[l_idx:r_idx]
_A = self.out_layers[0].bias[l_idx:r_idx]
else:
_A = self.out_layers[i].weight
_A = self.out_layers[i].bias
if i == 0:
_A = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_A = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
_A , _A , _A = weights[0], biases[0], self.out_projs[0]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
_A = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
_A , _A = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_A = head_logprob[:, : self.cutoffs[0]]
else:
_A , _A , _A = weights[i], biases[i], self.out_projs[i]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
_A = head_logprob[:, -i] + tail_logprob_i
_A = logprob_i
return out
| 180 | 1 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = 0
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""model""" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""bert""" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ :Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
__A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
__A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowerCAmelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowerCAmelCase ( self ) -> int:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = "new-model"
try:
AutoConfig.register("""new-model""" , __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 368 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DeiTFeatureExtractor']
__UpperCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.